CombinedText stringlengths 4 3.42M |
|---|
#!/usr/bin/env python
import httplib, urllib
class XenForo(object):
host = None
username = None
password = None
cookies = {}
headers = {
'Cache-Control': 'max-age=0',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept': 'application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) '+\
'AppleWebKit/534.16 (KHTML, like Gecko) '+\
'Chrome/10.0.648.205 Safari/534.16',
}
def __init__(self, username, password, host):
self.username = username
self.password = password
self.host = host
def _update_cookies(self, cookies):
print cookies
if cookies is not None:
for cookie in cookies.split(';'):
dset = cookie.split('=')
if len(dset) > 1:
if dset[0] in ['f_user', 'f_session', 'IDstack']:
self.cookies[dset[0]] = dset[1]
def _logged_in(self, page):
if page[:100].find('class="Public LoggedOut"') > -1:
return False
elif page[:100].find('class="Public LoggedIn"') > -1:
return True
else:
return 'UNKNOWN'
def _get_cookies(self):
cookies = []
for cookie in self.cookies:
cookies.append('%s=%s; ' % (cookie, self.cookies[cookie]))
print '; '.join(cookies)
return '; '.join(cookies)
def _get(self, loc):
con = httplib.HTTPConnection(self.host)
cookies = self._get_cookies()
headers = self.headers
if cookies is not '':
headers['Cookie'] = cookies
con.request('GET', loc, headers=headers)
resp = con.getresponse()
print resp.getheaders()
self._update_cookies(resp.getheader('set-cookie'))
newloc = resp.getheader('location')
if newloc is not None:
print 'Chasing Referral: %s' % newloc
page = self._get(newloc)
else:
page = resp.read()
return page
def _post(self, loc, formdata):
con = httplib.HTTPConnection(self.host)
cookies = self._get_cookies()
headers = self.headers
payload = urllib.urlencode(formdata)
if cookies is not '':
headers['Cookie'] = cookies
headers['Host'] = 'forums.bukkit.org'
headers['Origin'] = 'http://forums.bukkit.org/'
headers['Referer'] = 'http://forums.bukkit.org/'
headers['Content-Type'] = 'application/x-www-form-urlencoded'
headers['Content-Length'] = len(payload)
print headers
con.request('POST', loc, headers=headers, body=payload)
resp = con.getresponse()
print resp.getheaders()
self._update_cookies(resp.getheader('set-cookie'))
newloc = resp.getheader('location')
if newloc is not None:
print 'Chasing Referral: %s' % newloc
page = self._get(newloc)
else:
page = resp.read()
return page
def login(self):
self._get('/')
formdata = {
'login': self.username,
'register': 0,
'password': self.password,
'remember': 0,
'cookie_check': 1,
'redirect': '/',
'_xfToken': '',
}
page = self._post('/login/login', formdata)
return self._logged_in(page)
def private_message(self, user, message, locked=False):
pass
if __name__ == '__main__':
forum = XenForo('XXX', 'XXX', 'forums.bukkit.org')
print forum.login()
print forum.cookies
Minor modifications and cleanup
#!/usr/bin/env python
import httplib, urllib
class XenForo(object):
host = None
username = None
password = None
cookies = {}
headers = {
'Cache-Control': 'max-age=0',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept': 'application/xml,application/xhtml+xml,' +\
'text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
'User-Agent': 'Mozilla/5.0 '+\
'(Macintosh; U; Intel Mac OS X 10_6_7; en-US) '+\
'AppleWebKit/534.16 (KHTML, like Gecko) '+\
'Chrome/10.0.648.205 Safari/534.16',
}
def __init__(self, username, password, host):
self.username = username
self.password = password
self.host = host
def _update_cookies(self, cookies):
if cookies is not None:
for cookie in cookies.split(';'):
dset = cookie.split('=')
if len(dset) > 1:
if dset[0] in ['f_user', 'f_session', 'IDstack']:
self.cookies[dset[0]] = dset[1]
def _logged_in(self, page):
if page[:100].find('class="Public LoggedOut"') > -1:
return False
elif page[:100].find('class="Public LoggedIn"') > -1:
return True
else:
return 'UNKNOWN'
def _get_cookies(self):
cookies = []
for cookie in self.cookies:
cookies.append('%s=%s; ' % (cookie, self.cookies[cookie]))
print '; '.join(cookies)
return '; '.join(cookies)
def _get(self, loc):
con = httplib.HTTPConnection(self.host)
cookies = self._get_cookies()
headers = self.headers
if cookies is not '':
headers['Cookie'] = cookies
con.request('GET', loc, headers=headers)
resp = con.getresponse()
print resp.getheaders()
self._update_cookies(resp.getheader('set-cookie'))
newloc = resp.getheader('location')
if newloc is not None:
print 'Chasing Referral: %s' % newloc
page = self._get(newloc)
else:
page = resp.read()
return page
def _post(self, loc, formdata):
con = httplib.HTTPConnection(self.host)
cookies = self._get_cookies()
headers = self.headers
payload = urllib.urlencode(formdata)
if cookies is not '':
headers['Cookie'] = cookies
headers['Content-Type'] = 'application/x-www-form-urlencoded'
headers['Content-Length'] = len(payload)
print headers
con.request('POST', loc, headers=headers, body=payload)
resp = con.getresponse()
print resp.getheaders()
self._update_cookies(resp.getheader('set-cookie'))
newloc = resp.getheader('location')
if newloc is not None:
print 'Chasing Referral: %s' % newloc
page = self._get(newloc)
else:
page = resp.read()
return page
def login(self):
self._get('/')
formdata = {
'login': self.username,
'register': 0,
'password': self.password,
'remember': 0,
'cookie_check': 1,
'redirect': '/',
'_xfToken': '',
}
token = self._post('/login/csrf-token-refresh', {'_xfToken': ''})
print token
page = self._post('/login/login', formdata)
return self._logged_in(page)
def private_message(self, user, message, locked=False):
pass
if __name__ == '__main__':
forum = XenForo('XXX', 'XXX', 'forums.bukkit.org')
print forum.login()
print forum.cookies |
import bottleneck as bn
import numpy as np
from scipy import special
from scipy.stats import distributions
__all__ = [
"_pearson_r",
"_pearson_r_p_value",
"_rmse",
"_mse",
"_mae",
"_mad",
"_smape",
"_mape",
"_spearman_r",
"_spearman_r_p_value",
]
def _match_nans(a, b, weights):
"""
Considers missing values pairwise. If a value is missing
in a, the corresponding value in b is turned to nan, and
vice versa.
Returns
-------
a, b, weights : ndarray
a, b, and weights (if not None) with nans placed at
pairwise locations.
"""
if np.isnan(a).any() or np.isnan(b).any():
# Find pairwise indices in a and b that have nans.
idx = np.logical_or(np.isnan(a), np.isnan(b))
a[idx], b[idx] = np.nan, np.nan
if weights is not None:
weights[idx] = np.nan
return a, b, weights
def _get_numpy_funcs(skipna):
"""
Returns nansum and nanmean if skipna is True;
Returns sum and mean if skipna is False.
"""
if skipna:
return np.nansum, np.nanmean
else:
return np.sum, np.mean
def _check_weights(weights):
"""
Quick check if weights are all NaN. If so,
return None to guide weighting scheme.
"""
if weights is None:
return weights
elif np.all(np.isnan(weights)):
return None
else:
return weights
def _pearson_r(a, b, weights, axis, skipna):
"""
ndarray implementation of scipy.stats.pearsonr.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the correlation along.
weights : ndarray
Input array of weights for a and b.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Pearson's correlation coefficient.
See Also
--------
scipy.stats.pearsonr
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
a = np.rollaxis(a, axis)
b = np.rollaxis(b, axis)
# Only do weighted sums if there are weights. Cannot have a
# single generic function with weights of all ones, because
# the denominator gets inflated when there are masked regions.
if weights is not None:
weights = np.rollaxis(weights, axis)
ma = sumfunc(a * weights, axis=0) / sumfunc(weights, axis=0)
mb = sumfunc(b * weights, axis=0) / sumfunc(weights, axis=0)
else:
ma = meanfunc(a, axis=0)
mb = meanfunc(b, axis=0)
am, bm = a - ma, b - mb
if weights is not None:
r_num = sumfunc(weights * am * bm, axis=0)
r_den = np.sqrt(
sumfunc(weights * am * am, axis=0) * sumfunc(weights * bm * bm, axis=0)
)
else:
r_num = sumfunc(am * bm, axis=0)
r_den = np.sqrt(sumfunc(am * am, axis=0) * sumfunc(bm * bm, axis=0))
r = r_num / r_den
res = np.clip(r, -1.0, 1.0)
return res
def _pearson_r_p_value(a, b, weights, axis, skipna):
"""
ndarray implementation of scipy.stats.pearsonr.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the correlation along.
weights : ndarray
Input array of weights for a and b.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
2-tailed p-value.
See Also
--------
scipy.stats.pearsonr
"""
if skipna:
a, b, weights = _match_nans(a, b, weights)
r = _pearson_r(a, b, weights, axis, skipna)
if np.isnan(r).all():
return r
else:
# no nans or some nans
a = np.rollaxis(a, axis)
b = np.rollaxis(b, axis)
dof = np.apply_over_axes(np.sum, np.isnan(a * b), 0).squeeze() - 2
dof = np.where(dof > 1.0, dof, a.shape[0] - 2)
t_squared = r ** 2 * (dof / ((1.0 - r) * (1.0 + r)))
_x = dof / (dof + t_squared)
_x = np.asarray(_x)
_x = np.where(_x < 1.0, _x, 1.0)
_a = 0.5 * dof
_b = 0.5
res = special.betainc(_a, _b, _x)
# reset masked values to nan
nan_locs = np.where(np.isnan(r))
if len(nan_locs[0]) > 0:
res[nan_locs] = np.nan
return res
def _spearman_r(a, b, weights, axis, skipna):
"""
ndarray implementation of scipy.stats.spearmanr.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the correlation along.
weights : ndarray
Input array.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Spearmanr's correlation coefficient.
See Also
--------
scipy.stats.spearmanr
"""
if skipna:
a, b, weights = _match_nans(a, b, weights)
rankfunc = bn.nanrankdata
_a = rankfunc(a, axis=axis)
_b = rankfunc(b, axis=axis)
return _pearson_r(_a, _b, weights, axis, skipna)
def _spearman_r_p_value(a, b, weights, axis, skipna):
"""
ndarray implementation of scipy.stats.spearmanr.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the correlation along.
weights : ndarray
Input array.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
2-tailed p-value.
See Also
--------
scipy.stats.spearmanr
Reference
---------
https://github.com/scipy/scipy/blob/v1.3.1/scipy/stats/stats.py#L3613-L3764
"""
if skipna:
a, b, weights = _match_nans(a, b, weights)
rs = _spearman_r(a, b, weights, axis, skipna)
a = np.rollaxis(a, axis)
b = np.rollaxis(b, axis)
dof = np.apply_over_axes(np.sum, ~np.isnan(a * b), 0).squeeze() - 2
dof = np.where(dof > 1.0, dof, a.shape[0] - 2)
t = rs * np.sqrt((dof / ((rs + 1.0) * (1.0 - rs))).clip(0))
p = 2 * distributions.t.sf(np.abs(t), dof)
return p
def _rmse(a, b, weights, axis, skipna):
"""
Root Mean Squared Error.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the rmse along.
weights : ndarray
Input array of weights for a and b.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Root Mean Squared Error.
See Also
--------
sklearn.metrics.mean_squared_error
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
squared_error = (a - b) ** 2
if weights is not None:
mean_squared_error = sumfunc(squared_error * weights, axis=axis) / sumfunc(
weights, axis=axis
)
else:
mean_squared_error = meanfunc(((a - b) ** 2), axis=axis)
res = np.sqrt(mean_squared_error)
return res
def _mse(a, b, weights, axis, skipna):
"""
Mean Squared Error.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the mse along.
weights : ndarray
Input array of weights for a and b.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Mean Squared Error.
See Also
--------
sklearn.metrics.mean_squared_error
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
squared_error = (a - b) ** 2
if weights is not None:
return sumfunc(squared_error * weights, axis=axis) / sumfunc(weights, axis=axis)
else:
return meanfunc(squared_error, axis=axis)
def _mae(a, b, weights, axis, skipna):
"""
Mean Absolute Error.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the mae along.
weights : ndarray
Input array of weights for a and b.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Mean Absolute Error.
See Also
--------
sklearn.metrics.mean_absolute_error
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
absolute_error = np.absolute(a - b)
if weights is not None:
return sumfunc(absolute_error * weights, axis=axis) / sumfunc(
weights, axis=axis
)
else:
return meanfunc(absolute_error, axis=axis)
def _mad(a, b, axis, skipna):
"""
Median Absolute Deviation.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the mad along.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Median Absolute Deviation.
See Also
--------
scipy.stats.median_absolute_deviation
"""
medianfunc = np.nanmedian if skipna else np.median
if skipna:
a, b, _ = _match_nans(a, b, None)
absolute_error = np.absolute(a - b)
return medianfunc(absolute_error, axis=axis)
def _mape(a, b, weights, axis, skipna):
"""
Mean Absolute Percentage Error.
:: math MAPE = 1/n \sum \frac{|F_t-A_t|}{|A_t|}
Parameters
----------
a : ndarray
Input array (truth to be divided by).
b : ndarray
Input array.
axis : int
The axis to apply the mape along.
weights : ndarray
Input array.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Mean Absolute Percentage Error.
Reference
---------
https://en.wikipedia.org/wiki/Mean_absolute_percentage_error
Notes
-----
The percent error is calculated in reference to ``a``.
Percent error is reported as decimal percent. I.e., a value of
1 is 100%.
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
# replace divided by 0 with nan
mape = np.absolute(a - b) / np.absolute(np.where(a != 0, a, np.nan))
if weights is not None:
return sumfunc(mape * weights, axis=axis) / sumfunc(weights, axis=axis)
else:
return meanfunc(mape, axis=axis)
def _smape(a, b, weights, axis, skipna):
"""
Symmetric Mean Absolute Percentage Error.
:: math SMAPE = 1/n \sum \frac{|F_t-A_t|}{(|A_t|+|F_t|)}
Parameters
----------
a : ndarray
Input array (truth to be divided by).
b : ndarray
Input array.
axis : int
The axis to apply the mae along.
weights : ndarray
Input array.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Symmetric Mean Absolute Percentage Error.
Reference
---------
https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error
Notes
-----
Symmetric percent error is reported as decimal percent. I.e., a value of 1
is 100%.
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
smape = np.absolute(a - b) / (np.absolute(a) + np.absolute(b))
if weights is not None:
return sumfunc(smape * weights, axis=axis) / sumfunc(weights, axis=axis)
else:
return meanfunc(smape, axis=axis)
remove edit to spearman
import bottleneck as bn
import numpy as np
from scipy import special
from scipy.stats import distributions
__all__ = [
"_pearson_r",
"_pearson_r_p_value",
"_rmse",
"_mse",
"_mae",
"_mad",
"_smape",
"_mape",
"_spearman_r",
"_spearman_r_p_value",
]
def _match_nans(a, b, weights):
"""
Considers missing values pairwise. If a value is missing
in a, the corresponding value in b is turned to nan, and
vice versa.
Returns
-------
a, b, weights : ndarray
a, b, and weights (if not None) with nans placed at
pairwise locations.
"""
if np.isnan(a).any() or np.isnan(b).any():
# Find pairwise indices in a and b that have nans.
idx = np.logical_or(np.isnan(a), np.isnan(b))
a[idx], b[idx] = np.nan, np.nan
if weights is not None:
weights[idx] = np.nan
return a, b, weights
def _get_numpy_funcs(skipna):
"""
Returns nansum and nanmean if skipna is True;
Returns sum and mean if skipna is False.
"""
if skipna:
return np.nansum, np.nanmean
else:
return np.sum, np.mean
def _check_weights(weights):
"""
Quick check if weights are all NaN. If so,
return None to guide weighting scheme.
"""
if weights is None:
return weights
elif np.all(np.isnan(weights)):
return None
else:
return weights
def _pearson_r(a, b, weights, axis, skipna):
"""
ndarray implementation of scipy.stats.pearsonr.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the correlation along.
weights : ndarray
Input array of weights for a and b.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Pearson's correlation coefficient.
See Also
--------
scipy.stats.pearsonr
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
a = np.rollaxis(a, axis)
b = np.rollaxis(b, axis)
# Only do weighted sums if there are weights. Cannot have a
# single generic function with weights of all ones, because
# the denominator gets inflated when there are masked regions.
if weights is not None:
weights = np.rollaxis(weights, axis)
ma = sumfunc(a * weights, axis=0) / sumfunc(weights, axis=0)
mb = sumfunc(b * weights, axis=0) / sumfunc(weights, axis=0)
else:
ma = meanfunc(a, axis=0)
mb = meanfunc(b, axis=0)
am, bm = a - ma, b - mb
if weights is not None:
r_num = sumfunc(weights * am * bm, axis=0)
r_den = np.sqrt(
sumfunc(weights * am * am, axis=0) * sumfunc(weights * bm * bm, axis=0)
)
else:
r_num = sumfunc(am * bm, axis=0)
r_den = np.sqrt(sumfunc(am * am, axis=0) * sumfunc(bm * bm, axis=0))
r = r_num / r_den
res = np.clip(r, -1.0, 1.0)
return res
def _pearson_r_p_value(a, b, weights, axis, skipna):
"""
ndarray implementation of scipy.stats.pearsonr.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the correlation along.
weights : ndarray
Input array of weights for a and b.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
2-tailed p-value.
See Also
--------
scipy.stats.pearsonr
"""
if skipna:
a, b, weights = _match_nans(a, b, weights)
r = _pearson_r(a, b, weights, axis, skipna)
if np.isnan(r).all():
return r
else:
# no nans or some nans
a = np.rollaxis(a, axis)
b = np.rollaxis(b, axis)
dof = np.apply_over_axes(np.sum, np.isnan(a * b), 0).squeeze() - 2
dof = np.where(dof > 1.0, dof, a.shape[0] - 2)
t_squared = r ** 2 * (dof / ((1.0 - r) * (1.0 + r)))
_x = dof / (dof + t_squared)
_x = np.asarray(_x)
_x = np.where(_x < 1.0, _x, 1.0)
_a = 0.5 * dof
_b = 0.5
res = special.betainc(_a, _b, _x)
# reset masked values to nan
nan_locs = np.where(np.isnan(r))
if len(nan_locs[0]) > 0:
res[nan_locs] = np.nan
return res
def _spearman_r(a, b, weights, axis, skipna):
"""
ndarray implementation of scipy.stats.spearmanr.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the correlation along.
weights : ndarray
Input array.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Spearmanr's correlation coefficient.
See Also
--------
scipy.stats.spearmanr
"""
if skipna:
a, b, weights = _match_nans(a, b, weights)
rankfunc = bn.nanrankdata
_a = rankfunc(a, axis=axis)
_b = rankfunc(b, axis=axis)
return _pearson_r(_a, _b, weights, axis, skipna)
def _spearman_r_p_value(a, b, weights, axis, skipna):
"""
ndarray implementation of scipy.stats.spearmanr.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the correlation along.
weights : ndarray
Input array.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
2-tailed p-value.
See Also
--------
scipy.stats.spearmanr
Reference
---------
https://github.com/scipy/scipy/blob/v1.3.1/scipy/stats/stats.py#L3613-L3764
"""
if skipna:
a, b, weights = _match_nans(a, b, weights)
rs = _spearman_r(a, b, weights, axis, skipna)
a = np.rollaxis(a, axis)
b = np.rollaxis(b, axis)
dof = np.apply_over_axes(np.sum, np.isnan(a * b), 0).squeeze() - 2
dof = np.where(dof > 1.0, dof, a.shape[0] - 2)
t = rs * np.sqrt((dof / ((rs + 1.0) * (1.0 - rs))).clip(0))
p = 2 * distributions.t.sf(np.abs(t), dof)
return p
def _rmse(a, b, weights, axis, skipna):
"""
Root Mean Squared Error.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the rmse along.
weights : ndarray
Input array of weights for a and b.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Root Mean Squared Error.
See Also
--------
sklearn.metrics.mean_squared_error
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
squared_error = (a - b) ** 2
if weights is not None:
mean_squared_error = sumfunc(squared_error * weights, axis=axis) / sumfunc(
weights, axis=axis
)
else:
mean_squared_error = meanfunc(((a - b) ** 2), axis=axis)
res = np.sqrt(mean_squared_error)
return res
def _mse(a, b, weights, axis, skipna):
"""
Mean Squared Error.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the mse along.
weights : ndarray
Input array of weights for a and b.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Mean Squared Error.
See Also
--------
sklearn.metrics.mean_squared_error
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
squared_error = (a - b) ** 2
if weights is not None:
return sumfunc(squared_error * weights, axis=axis) / sumfunc(weights, axis=axis)
else:
return meanfunc(squared_error, axis=axis)
def _mae(a, b, weights, axis, skipna):
"""
Mean Absolute Error.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the mae along.
weights : ndarray
Input array of weights for a and b.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Mean Absolute Error.
See Also
--------
sklearn.metrics.mean_absolute_error
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
absolute_error = np.absolute(a - b)
if weights is not None:
return sumfunc(absolute_error * weights, axis=axis) / sumfunc(
weights, axis=axis
)
else:
return meanfunc(absolute_error, axis=axis)
def _mad(a, b, axis, skipna):
"""
Median Absolute Deviation.
Parameters
----------
a : ndarray
Input array.
b : ndarray
Input array.
axis : int
The axis to apply the mad along.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Median Absolute Deviation.
See Also
--------
scipy.stats.median_absolute_deviation
"""
medianfunc = np.nanmedian if skipna else np.median
if skipna:
a, b, _ = _match_nans(a, b, None)
absolute_error = np.absolute(a - b)
return medianfunc(absolute_error, axis=axis)
def _mape(a, b, weights, axis, skipna):
"""
Mean Absolute Percentage Error.
:: math MAPE = 1/n \sum \frac{|F_t-A_t|}{|A_t|}
Parameters
----------
a : ndarray
Input array (truth to be divided by).
b : ndarray
Input array.
axis : int
The axis to apply the mape along.
weights : ndarray
Input array.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Mean Absolute Percentage Error.
Reference
---------
https://en.wikipedia.org/wiki/Mean_absolute_percentage_error
Notes
-----
The percent error is calculated in reference to ``a``.
Percent error is reported as decimal percent. I.e., a value of
1 is 100%.
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
# replace divided by 0 with nan
mape = np.absolute(a - b) / np.absolute(np.where(a != 0, a, np.nan))
if weights is not None:
return sumfunc(mape * weights, axis=axis) / sumfunc(weights, axis=axis)
else:
return meanfunc(mape, axis=axis)
def _smape(a, b, weights, axis, skipna):
"""
Symmetric Mean Absolute Percentage Error.
:: math SMAPE = 1/n \sum \frac{|F_t-A_t|}{(|A_t|+|F_t|)}
Parameters
----------
a : ndarray
Input array (truth to be divided by).
b : ndarray
Input array.
axis : int
The axis to apply the mae along.
weights : ndarray
Input array.
skipna : bool
If True, skip NaNs when computing function.
Returns
-------
res : ndarray
Symmetric Mean Absolute Percentage Error.
Reference
---------
https://en.wikipedia.org/wiki/Symmetric_mean_absolute_percentage_error
Notes
-----
Symmetric percent error is reported as decimal percent. I.e., a value of 1
is 100%.
"""
sumfunc, meanfunc = _get_numpy_funcs(skipna)
if skipna:
a, b, weights = _match_nans(a, b, weights)
weights = _check_weights(weights)
smape = np.absolute(a - b) / (np.absolute(a) + np.absolute(b))
if weights is not None:
return sumfunc(smape * weights, axis=axis) / sumfunc(weights, axis=axis)
else:
return meanfunc(smape, axis=axis)
|
Infrastructure to convert Python ADJ_NBLOCK_DERIVATIVE_ACTION_CB callbacks to cfuncs, so that they can be registered.
|
from PyQt5.QtCore import pyqtSignal, QObject
from PyQt5.QtCore import Qt, QRect, QSize
from PyQt5.QtGui import QBrush, QColor, QFont, QPalette, QPen
from PyQt5.QtWidgets import QTreeWidget, QHeaderView
from PyQt5.QtWidgets import QTreeWidgetItem, QTreeWidgetItemIterator
from PyQt5.QtWidgets import QSizePolicy, QStyledItemDelegate
from PyQt5.QtWidgets import QSpinBox, QLineEdit, QPushButton
from PyQt5.QtWidgets import QStyleOptionButton, QStyleOptionViewItem
from PyQt5.QtWidgets import QAbstractItemView, QCheckBox
from PyQt5.QtWidgets import QStyle, QCommonStyle
from PyQt5.QtWidgets import QColorDialog
from cadnano.enum import PartType
from cadnano.gui.palette import getColorObj, getPenObj, getBrushObj
from cadnano.gui.views.pathview import pathstyles as styles
from cadnano.gui.controllers.viewrootcontroller import ViewRootController
from .nucleicacidpartitem import NucleicAcidPartItem
from .plasmidpartitem import PlasmidPartItem
_FONT = QFont(styles.THE_FONT, 12)
_QCOMMONSTYLE = QCommonStyle()
class OutlinerTreeWidget(QTreeWidget):
"""
"""
def __init__(self, parent=None):
super(OutlinerTreeWidget, self).__init__(parent)
self.setAttribute(Qt.WA_MacShowFocusRect, 0) # no mac focus halo
def configure(self, window, document):
self._window = window
self._document = document
self._controller = ViewRootController(self, document)
self._root = self.invisibleRootItem()
self._instance_items = {}
# Appearance
self.setFont(_FONT)
# Columns
self.setColumnCount(3)
self.setIndentation(14)
# Header
self.setHeaderLabels(["Name", "", ""])
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.Fixed)
h.setSectionResizeMode(2, QHeaderView.Fixed)
h.setSectionsMovable(True)
self.setColumnWidth(0, 140)
self.setColumnWidth(1, 18)
self.setColumnWidth(2, 18)
# Dragging
self.setDragEnabled(True)
self.setDragDropMode(QAbstractItemView.InternalMove)
custom_delegate = CustomStyleItemDelegate()
self.setItemDelegate(custom_delegate)
self.model().dataChanged.connect(self.dataChangedSlot)
self.itemSelectionChanged.connect(self.selectedChangedSlot)
# Add some dummy items
# p1 = self.addDummyRow("Part0", True, "#cc0000")
# a1 = self.addDummyRow("Asm0", True, "#00cc00")
# self.expandItem(a1)
# p2 = self.addDummyRow("Part1", True, "#0000cc", a1)
# p3 = self.addDummyRow("Part2", True, "#cc6600", a1)
# end def
def addDummyRow(self, part_name, visible, color, parent_QTreeWidgetItem=None):
if parent_QTreeWidgetItem is None:
parent_QTreeWidgetItem = self.invisibleRootItem()
tw_item = QTreeWidgetItem(parent_QTreeWidgetItem)
tw_item.setData(0, Qt.EditRole, part_name)
tw_item.setData(1, Qt.EditRole, visible)
tw_item.setData(2, Qt.EditRole, color)
tw_item.setFlags(tw_item.flags() | Qt.ItemIsEditable)
return tw_item
# end def
def getInstanceCount(self):
return len(self._instance_items)
### SIGNALS ###
### SLOTS ###
def partAddedSlot(self, sender, model_part_instance):
"""
Receives notification from the model that a part has been added.
Parts should add themselves to the QTreeWidget by passing parent=self.
"""
model_part = model_part_instance.object()
part_type = model_part_instance.object().partType()
if part_type == PartType.PLASMIDPART:
plasmid_part_item = PlasmidPartItem(model_part, parent=self)
self._instance_items[model_part_instance] = plasmid_part_item
elif part_type == PartType.NUCLEICACIDPART:
na_part_item = NucleicAcidPartItem(model_part, parent=self)
self._instance_items[model_part_instance] = na_part_item
else:
print(part_type)
raise NotImplementedError
# end def
def clearSelectionsSlot(self, doc):
pass
# end def
def selectedChangedSlot(self):
for mpi in self._instance_items:
if self._instance_items[mpi] in self.selectedItems():
mpi.object().setSelected(True)
else:
mpi.object().setSelected(False)
# end def
def selectionFilterChangedSlot(self, filter_name_list):
pass
# end def
def preXoverFilterChangedSlot(self, filter_name):
pass
# end def
def resetRootItemSlot(self, doc):
pass
# end def
def dataChangedSlot(self, top_left, bot_right):
c_i = self.currentItem()
if c_i is None:
return
if c_i == self.itemFromIndex(top_left):
c_i.updateModel()
# end def
### ACCESSORS ###
def window(self):
return self._window
# end def
### METHODS ###
def removePartItem(self, part_item):
index = self.indexOfTopLevelItem(part_item)
self.takeTopLevelItem(index)
# end def
def resetDocumentAndController(self, document):
"""docstring for resetDocumentAndController"""
self._document = document
self._controller = ViewRootController(self, document)
if len(self._instance_items) > 0:
raise ImportError
# end def
def setModifyState(self, bool):
"""docstring for setModifyState"""
for part_item in self._instance_items:
part_item.setModifyState(bool)
# end def
# end class OutlinerTreeWidget
class CustomStyleItemDelegate(QStyledItemDelegate):
def createEditor(self, parent_QWidget, option, model_index):
column = model_index.column()
if column == 0: # Part Name
editor = QLineEdit(parent_QWidget)
editor.setAlignment(Qt.AlignVCenter)
return editor
elif column == 1: # Visibility checkbox
editor = QCheckBox(parent_QWidget)
# setAlignment doesn't work https://bugreports.qt-project.org/browse/QTBUG-5368
return editor
elif column == 2: # Color Picker
editor = QColorDialog(parent_QWidget)
return editor
# elif column == 3: # SpinBox Example
# editor = QSpinBox(parent_QWidget)
# editor.setAlignment(Qt.AlignHCenter|Qt.AlignVCenter)
# editor.setMinimum(0)
# editor.setMaximum(100)
# return editor
else:
return QStyledItemDelegate.createEditor(self, \
parent_QWidget, option, model_index)
# end def
def setEditorData(self, editor, model_index):
column = model_index.column()
if column == 0: # Part Name
text_QString = model_index.model().data(model_index, Qt.EditRole)
editor.setText(text_QString)
elif column == 1: # Visibility
value = model_index.model().data(model_index, Qt.EditRole)
editor.setChecked(value)
elif column == 2: # Color
value = model_index.model().data(model_index, Qt.EditRole)
# editor.setText(value)
editor.setCurrentColor(QColor(value))
# elif column == 3: # SpinBox Example
# value = model_index.model().data(model_index, Qt.EditRole)
# editor.setValue(value)
else:
QStyledItemDelegate.setEditorData(self, editor, model_index)
# end def
def setModelData(self, editor, model, model_index):
column = model_index.column()
if column == 0: # Part Name
text_QString = editor.text()
model.setData(model_index, text_QString, Qt.EditRole)
elif column == 1: # Visibility
value = editor.isChecked()
model.setData(model_index, value, Qt.EditRole)
elif column == 2: # Color
# color = editor.text()
# model.setData(model_index, color, Qt.EditRole)
color = editor.currentColor()
model.setData(model_index, color.name(), Qt.EditRole)
# elif column == 3: # SpinBox Example
# value = editor.value()
# model.setData(model_index, value, Qt.EditRole)
else:
QStyledItemDelegate.setModelData(self, editor, model, model_index)
# end def
def updateEditorGeometry(self, editor, option, model_index):
column = model_index.column()
if column == 0:
editor.setGeometry(option.rect)
elif column == 1:
rect = QRect(option.rect)
delta = option.rect.width() / 2 - 9
rect.setX(option.rect.x() + delta) # Hack to center the checkbox
editor.setGeometry(rect)
elif column == 2:
pass
# editor.setGeometry(option.rect)
else:
QStyledItemDelegate.updateEditorGeometry(self, editor, option, model_index)
# end def
def paint(self, painter, option, model_index):
column = model_index.column()
if column == 0: # Part Name
option.displayAlignment = Qt.AlignVCenter
QStyledItemDelegate.paint(self, painter, option, model_index)
if column == 1: # Visibility
element = _QCOMMONSTYLE.PE_IndicatorCheckBox
styleoption = QStyleOptionButton()
styleoption.rect = QRect(option.rect)
checked = model_index.model().data(model_index, Qt.EditRole)
styleoption.state |= QStyle.State_On if checked else QStyle.State_Off
_QCOMMONSTYLE.drawPrimitive(element, styleoption, painter)
if checked:
element = _QCOMMONSTYLE.PE_IndicatorMenuCheckMark
_QCOMMONSTYLE.drawPrimitive(element, styleoption, painter)
elif column == 2: # Color
color = model_index.model().data(model_index, Qt.EditRole)
element = _QCOMMONSTYLE.PE_IndicatorCheckBox
styleoption = QStyleOptionViewItem()
styleoption.palette.setBrush(QPalette.Button, QBrush(getColorObj(color)))
styleoption.rect = QRect(option.rect)
_QCOMMONSTYLE.drawPrimitive(element, styleoption, painter)
# elif column == 3: # SpinBox Example
# value = model_index.model().data(model_index, Qt.EditRole)
# option.displayAlignment = Qt.AlignHCenter | Qt.AlignVCenter
# currentQRect = QRect(option.rect)
# # currentQRect.setWidth(currentQRect.width() - 22)
# currentQRect.setWidth(20)
# # self.drawDisplay(painter, option, currentQRect, value)
# spinBoxQStyleOptionSpinBox = QStyleOptionSpinBox()
# spinBoxQStyleOptionSpinBox.rect = QRect(option.rect)
# _QCOMMONSTYLE.drawComplexControl(_QCOMMONSTYLE.CC_SpinBox, \
# spinBoxQStyleOptionSpinBox, \
# painter)
# elif column == 4: # PushButton example
# text_QString = model_index.model().data(model_index, Qt.EditRole)
# buttonQStyleOptionButton = QStyleOptionButton()
# buttonQStyleOptionButton.rect = QRect(option.rect)
# buttonQStyleOptionButton.text = text_QString
# buttonQStyleOptionButton.state = QStyle.State_Active
# _QCOMMONSTYLE.drawControl(_QCOMMONSTYLE.CE_PushButton, buttonQStyleOptionButton, painter)
else:
QStyledItemDelegate.paint(self, painter, option, model_index)
# end def
# end class CustomStyleItemDelegate
class OutlineRootItem(QTreeWidget):
"""
OutlineRootItem is the root item in the OutlineView. It gets added directly
to the pathscene by DocumentWindow. It receives two signals
(partAddedSignal and selectedPartChangedSignal) via its ViewRootController.
OutlineRootItem must instantiate its own controller to receive signals
from the model.
"""
def __init__(self, parent, window, document):
super(OutlineRootItem, self).__init__(parent)
self._window = window
self._document = document
self._controller = ViewRootController(self, document)
self._root = self.invisibleRootItem()
self._instance_items = {}
self._configure() # setup header and drag mode
custom_delegate = CustomStyleItemDelegate()
self.setItemDelegate(custom_delegate)
# Add some dummy items
p1 = self.addDummyRow("Part0", True, "#cc0000")
a1 = self.addDummyRow("Asm0", True, "#00cc00")
self.expandItem(a1)
p2 = self.addDummyRow("Part1", True, "#0000cc", a1)
p3 = self.addDummyRow("Part2", True, "#cc6600", a1)
def _configure(self):
# Appearance
self.setFont(_FONT)
# Columns
self.setColumnCount(3)
self.setIndentation(14)
# Header
self.setHeaderLabels(["Name", "", ""])
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.Fixed)
h.setSectionResizeMode(2, QHeaderView.Fixed)
h.setSectionsMovable(True)
self.setColumnWidth(0, 140)
self.setColumnWidth(1, 18)
self.setColumnWidth(2, 18)
# Dragging
self.setDragEnabled(True)
self.setDragDropMode(QAbstractItemView.InternalMove)
# end def
def addDummyRow(self, part_name, visible, color, parent_QTreeWidgetItem=None):
if parent_QTreeWidgetItem == None:
parent_QTreeWidgetItem = self.invisibleRootItem()
tw_item = QTreeWidgetItem(parent_QTreeWidgetItem)
tw_item.setData(0, Qt.EditRole, part_name)
tw_item.setData(1, Qt.EditRole, visible)
tw_item.setData(2, Qt.EditRole, color)
tw_item.setFlags(tw_item.flags() | Qt.ItemIsEditable)
return tw_item
# end def
def getInstanceCount(self):
return len(self._instance_items)
### SIGNALS ###
### SLOTS ###
def partAddedSlot(self, sender, model_part):
"""
Receives notification from the model that a part has been added.
Parts should add themselves to the QTreeWidget by passing parent=self.
"""
part_type = model_part.__class__.__name__
if part_type == "PlasmidPart":
plasmid_part_item = PlasmidPartItem(model_part, parent=self)
elif part_type in ["HoneycombPart", "SquarePart"]:
nucleicacid_part_item = OrigamiPartItem(model_part, parent=self)
self.addTopLevelItem(nucleicacid_part_item)
else:
print(part_type)
raise NotImplementedError
# end def
def clearSelectionsSlot(self, doc):
pass
# end def
# def selectionFilterChangedSlot(self, filter_name_list):
# pass
# # end def
# def preXoverFilterChangedSlot(self, filter_name):
# pass
# # end def
# def resetRootItemSlot(self, doc):
# pass
# # end def
### ACCESSORS ###
# def OutlineToolManager(self):
# """docstring for OutlineToolManager"""
# return self._window.OutlineToolManager
# # end def
def window(self):
return self._window
# end def
### METHODS ###
def removePartItem(self, part_item):
index = self.indexOfTopLevelItem(part_item)
self.takeTopLevelItem(index)
# del self._instance_items[plasmid_part_item]
# end def
def resetDocumentAndController(self, document):
"""docstring for resetDocumentAndController"""
self._document = document
self._controller = ViewRootController(self, document)
if len(self._instance_items) > 0:
raise ImportError("resetDocumentAndController no _instance_items")
# end def
def setModifyState(self, bool):
"""docstring for setModifyState"""
for nucleicacid_part_item in self._instance_items:
nucleicacid_part_item.setModifyState(bool)
# end def
outline view uses QColorDialog for color picking
from PyQt5.QtCore import pyqtSignal, QObject
from PyQt5.QtCore import Qt, QRect, QSize
from PyQt5.QtGui import QBrush, QColor, QFont, QPalette, QPen
from PyQt5.QtWidgets import QTreeWidget, QHeaderView
from PyQt5.QtWidgets import QTreeWidgetItem, QTreeWidgetItemIterator
from PyQt5.QtWidgets import QSizePolicy, QStyledItemDelegate
from PyQt5.QtWidgets import QSpinBox, QLineEdit, QPushButton
from PyQt5.QtWidgets import QStyleOptionButton, QStyleOptionViewItem
from PyQt5.QtWidgets import QAbstractItemView, QCheckBox
from PyQt5.QtWidgets import QStyle, QCommonStyle
from PyQt5.QtWidgets import QColorDialog
from cadnano.enum import PartType
from cadnano.gui.palette import getColorObj, getPenObj, getBrushObj
from cadnano.gui.views.pathview import pathstyles as styles
from cadnano.gui.controllers.viewrootcontroller import ViewRootController
from .nucleicacidpartitem import NucleicAcidPartItem
from .plasmidpartitem import PlasmidPartItem
_FONT = QFont(styles.THE_FONT, 12)
_QCOMMONSTYLE = QCommonStyle()
class OutlinerTreeWidget(QTreeWidget):
"""
"""
def __init__(self, parent=None):
super(OutlinerTreeWidget, self).__init__(parent)
self.setAttribute(Qt.WA_MacShowFocusRect, 0) # no mac focus halo
def configure(self, window, document):
self._window = window
self._document = document
self._controller = ViewRootController(self, document)
self._root = self.invisibleRootItem()
self._instance_items = {}
# Appearance
self.setFont(_FONT)
# Columns
self.setColumnCount(3)
self.setIndentation(14)
# Header
self.setHeaderLabels(["Name", "", ""])
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.Fixed)
h.setSectionResizeMode(2, QHeaderView.Fixed)
h.setSectionsMovable(True)
self.setColumnWidth(0, 140)
self.setColumnWidth(1, 18)
self.setColumnWidth(2, 18)
# Dragging
self.setDragEnabled(True)
self.setDragDropMode(QAbstractItemView.InternalMove)
custom_delegate = CustomStyleItemDelegate()
self.setItemDelegate(custom_delegate)
self.model().dataChanged.connect(self.dataChangedSlot)
self.itemSelectionChanged.connect(self.selectedChangedSlot)
# Add some dummy items
# p1 = self.addDummyRow("Part0", True, "#cc0000")
# a1 = self.addDummyRow("Asm0", True, "#00cc00")
# self.expandItem(a1)
# p2 = self.addDummyRow("Part1", True, "#0000cc", a1)
# p3 = self.addDummyRow("Part2", True, "#cc6600", a1)
# end def
def addDummyRow(self, part_name, visible, color, parent_QTreeWidgetItem=None):
if parent_QTreeWidgetItem is None:
parent_QTreeWidgetItem = self.invisibleRootItem()
tw_item = QTreeWidgetItem(parent_QTreeWidgetItem)
tw_item.setData(0, Qt.EditRole, part_name)
tw_item.setData(1, Qt.EditRole, visible)
tw_item.setData(2, Qt.EditRole, color)
tw_item.setFlags(tw_item.flags() | Qt.ItemIsEditable)
return tw_item
# end def
def getInstanceCount(self):
return len(self._instance_items)
### SIGNALS ###
### SLOTS ###
def partAddedSlot(self, sender, model_part_instance):
"""
Receives notification from the model that a part has been added.
Parts should add themselves to the QTreeWidget by passing parent=self.
"""
model_part = model_part_instance.object()
part_type = model_part_instance.object().partType()
if part_type == PartType.PLASMIDPART:
plasmid_part_item = PlasmidPartItem(model_part, parent=self)
self._instance_items[model_part_instance] = plasmid_part_item
elif part_type == PartType.NUCLEICACIDPART:
na_part_item = NucleicAcidPartItem(model_part, parent=self)
self._instance_items[model_part_instance] = na_part_item
else:
print(part_type)
raise NotImplementedError
# end def
def clearSelectionsSlot(self, doc):
pass
# end def
def selectedChangedSlot(self):
for mpi in self._instance_items:
if self._instance_items[mpi] in self.selectedItems():
mpi.object().setSelected(True)
else:
mpi.object().setSelected(False)
# end def
def selectionFilterChangedSlot(self, filter_name_list):
pass
# end def
def preXoverFilterChangedSlot(self, filter_name):
pass
# end def
def resetRootItemSlot(self, doc):
pass
# end def
def dataChangedSlot(self, top_left, bot_right):
c_i = self.currentItem()
if c_i is None:
return
if c_i == self.itemFromIndex(top_left):
c_i.updateModel()
# end def
### ACCESSORS ###
def window(self):
return self._window
# end def
### METHODS ###
def removePartItem(self, part_item):
index = self.indexOfTopLevelItem(part_item)
self.takeTopLevelItem(index)
# end def
def resetDocumentAndController(self, document):
"""docstring for resetDocumentAndController"""
self._document = document
self._controller = ViewRootController(self, document)
if len(self._instance_items) > 0:
raise ImportError
# end def
def setModifyState(self, bool):
"""docstring for setModifyState"""
for part_item in self._instance_items:
part_item.setModifyState(bool)
# end def
# end class OutlinerTreeWidget
class CustomStyleItemDelegate(QStyledItemDelegate):
def createEditor(self, parent_QWidget, option, model_index):
column = model_index.column()
if column == 0: # Part Name
editor = QLineEdit(parent_QWidget)
editor.setAlignment(Qt.AlignVCenter)
return editor
elif column == 1: # Visibility checkbox
editor = QCheckBox(parent_QWidget)
# setAlignment doesn't work https://bugreports.qt-project.org/browse/QTBUG-5368
return editor
elif column == 2: # Color Picker
editor = QColorDialog(parent_QWidget)
return editor
# elif column == 3: # SpinBox Example
# editor = QSpinBox(parent_QWidget)
# editor.setAlignment(Qt.AlignHCenter|Qt.AlignVCenter)
# editor.setMinimum(0)
# editor.setMaximum(100)
# return editor
else:
return QStyledItemDelegate.createEditor(self, \
parent_QWidget, option, model_index)
# end def
def setEditorData(self, editor, model_index):
column = model_index.column()
if column == 0: # Part Name
text_QString = model_index.model().data(model_index, Qt.EditRole)
editor.setText(text_QString)
elif column == 1: # Visibility
value = model_index.model().data(model_index, Qt.EditRole)
editor.setChecked(value)
elif column == 2: # Color
# value = model_index.model().data(model_index, Qt.EditRole)
# editor.setText(value)
value = model_index.model().data(model_index, Qt.DecorationRole)
editor.setCurrentColor(QColor(value))
# elif column == 3: # SpinBox Example
# value = model_index.model().data(model_index, Qt.EditRole)
# editor.setValue(value)
else:
QStyledItemDelegate.setEditorData(self, editor, model_index)
# end def
def setModelData(self, editor, model, model_index):
column = model_index.column()
if column == 0: # Part Name
text_QString = editor.text()
model.setData(model_index, text_QString, Qt.EditRole)
elif column == 1: # Visibility
value = editor.isChecked()
model.setData(model_index, value, Qt.EditRole)
elif column == 2: # Color
# color = editor.text()
# model.setData(model_index, color, Qt.EditRole)
color = editor.currentColor()
model.setData(model_index, color.name(), Qt.DecorationRole)
# elif column == 3: # SpinBox Example
# value = editor.value()
# model.setData(model_index, value, Qt.EditRole)
else:
QStyledItemDelegate.setModelData(self, editor, model, model_index)
# end def
def updateEditorGeometry(self, editor, option, model_index):
column = model_index.column()
if column == 0:
editor.setGeometry(option.rect)
elif column == 1:
rect = QRect(option.rect)
delta = option.rect.width() / 2 - 9
rect.setX(option.rect.x() + delta) # Hack to center the checkbox
editor.setGeometry(rect)
elif column == 2:
pass
# editor.setGeometry(option.rect)
else:
QStyledItemDelegate.updateEditorGeometry(self, editor, option, model_index)
# end def
def paint(self, painter, option, model_index):
column = model_index.column()
if column == 0: # Part Name
option.displayAlignment = Qt.AlignVCenter
QStyledItemDelegate.paint(self, painter, option, model_index)
if column == 1: # Visibility
element = _QCOMMONSTYLE.PE_IndicatorCheckBox
styleoption = QStyleOptionButton()
styleoption.rect = QRect(option.rect)
checked = model_index.model().data(model_index, Qt.EditRole)
styleoption.state |= QStyle.State_On if checked else QStyle.State_Off
_QCOMMONSTYLE.drawPrimitive(element, styleoption, painter)
if checked:
element = _QCOMMONSTYLE.PE_IndicatorMenuCheckMark
_QCOMMONSTYLE.drawPrimitive(element, styleoption, painter)
elif column == 2: # Color
# color = model_index.model().data(model_index, Qt.EditRole)
color = model_index.model().data(model_index, Qt.DecorationRole)
element = _QCOMMONSTYLE.PE_IndicatorCheckBox
styleoption = QStyleOptionViewItem()
styleoption.palette.setBrush(QPalette.Button, QBrush(getColorObj(color)))
styleoption.rect = QRect(option.rect)
_QCOMMONSTYLE.drawPrimitive(element, styleoption, painter)
# elif column == 3: # SpinBox Example
# value = model_index.model().data(model_index, Qt.EditRole)
# option.displayAlignment = Qt.AlignHCenter | Qt.AlignVCenter
# currentQRect = QRect(option.rect)
# # currentQRect.setWidth(currentQRect.width() - 22)
# currentQRect.setWidth(20)
# # self.drawDisplay(painter, option, currentQRect, value)
# spinBoxQStyleOptionSpinBox = QStyleOptionSpinBox()
# spinBoxQStyleOptionSpinBox.rect = QRect(option.rect)
# _QCOMMONSTYLE.drawComplexControl(_QCOMMONSTYLE.CC_SpinBox, \
# spinBoxQStyleOptionSpinBox, \
# painter)
# elif column == 4: # PushButton example
# text_QString = model_index.model().data(model_index, Qt.EditRole)
# buttonQStyleOptionButton = QStyleOptionButton()
# buttonQStyleOptionButton.rect = QRect(option.rect)
# buttonQStyleOptionButton.text = text_QString
# buttonQStyleOptionButton.state = QStyle.State_Active
# _QCOMMONSTYLE.drawControl(_QCOMMONSTYLE.CE_PushButton, buttonQStyleOptionButton, painter)
else:
QStyledItemDelegate.paint(self, painter, option, model_index)
# end def
# end class CustomStyleItemDelegate
class OutlineRootItem(QTreeWidget):
"""
OutlineRootItem is the root item in the OutlineView. It gets added directly
to the pathscene by DocumentWindow. It receives two signals
(partAddedSignal and selectedPartChangedSignal) via its ViewRootController.
OutlineRootItem must instantiate its own controller to receive signals
from the model.
"""
def __init__(self, parent, window, document):
super(OutlineRootItem, self).__init__(parent)
self._window = window
self._document = document
self._controller = ViewRootController(self, document)
self._root = self.invisibleRootItem()
self._instance_items = {}
self._configure() # setup header and drag mode
custom_delegate = CustomStyleItemDelegate()
self.setItemDelegate(custom_delegate)
# Add some dummy items
p1 = self.addDummyRow("Part0", True, "#cc0000")
a1 = self.addDummyRow("Asm0", True, "#00cc00")
self.expandItem(a1)
p2 = self.addDummyRow("Part1", True, "#0000cc", a1)
p3 = self.addDummyRow("Part2", True, "#cc6600", a1)
def _configure(self):
# Appearance
self.setFont(_FONT)
# Columns
self.setColumnCount(3)
self.setIndentation(14)
# Header
self.setHeaderLabels(["Name", "", ""])
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.Fixed)
h.setSectionResizeMode(2, QHeaderView.Fixed)
h.setSectionsMovable(True)
self.setColumnWidth(0, 140)
self.setColumnWidth(1, 18)
self.setColumnWidth(2, 18)
# Dragging
self.setDragEnabled(True)
self.setDragDropMode(QAbstractItemView.InternalMove)
# end def
def addDummyRow(self, part_name, visible, color, parent_QTreeWidgetItem=None):
if parent_QTreeWidgetItem == None:
parent_QTreeWidgetItem = self.invisibleRootItem()
tw_item = QTreeWidgetItem(parent_QTreeWidgetItem)
tw_item.setData(0, Qt.EditRole, part_name)
tw_item.setData(1, Qt.EditRole, visible)
tw_item.setData(2, Qt.EditRole, color)
tw_item.setFlags(tw_item.flags() | Qt.ItemIsEditable)
return tw_item
# end def
def getInstanceCount(self):
return len(self._instance_items)
### SIGNALS ###
### SLOTS ###
def partAddedSlot(self, sender, model_part):
"""
Receives notification from the model that a part has been added.
Parts should add themselves to the QTreeWidget by passing parent=self.
"""
part_type = model_part.__class__.__name__
if part_type == "PlasmidPart":
plasmid_part_item = PlasmidPartItem(model_part, parent=self)
elif part_type in ["HoneycombPart", "SquarePart"]:
nucleicacid_part_item = OrigamiPartItem(model_part, parent=self)
self.addTopLevelItem(nucleicacid_part_item)
else:
print(part_type)
raise NotImplementedError
# end def
def clearSelectionsSlot(self, doc):
pass
# end def
# def selectionFilterChangedSlot(self, filter_name_list):
# pass
# # end def
# def preXoverFilterChangedSlot(self, filter_name):
# pass
# # end def
# def resetRootItemSlot(self, doc):
# pass
# # end def
### ACCESSORS ###
# def OutlineToolManager(self):
# """docstring for OutlineToolManager"""
# return self._window.OutlineToolManager
# # end def
def window(self):
return self._window
# end def
### METHODS ###
def removePartItem(self, part_item):
index = self.indexOfTopLevelItem(part_item)
self.takeTopLevelItem(index)
# del self._instance_items[plasmid_part_item]
# end def
def resetDocumentAndController(self, document):
"""docstring for resetDocumentAndController"""
self._document = document
self._controller = ViewRootController(self, document)
if len(self._instance_items) > 0:
raise ImportError("resetDocumentAndController no _instance_items")
# end def
def setModifyState(self, bool):
"""docstring for setModifyState"""
for nucleicacid_part_item in self._instance_items:
nucleicacid_part_item.setModifyState(bool)
# end def
|
"""
This is a basic model to test saving and loading boolean and date-related
types, which in the past were problematic for some database backends.
"""
from django.db import models
from django.conf import settings
class Donut(models.Model):
name = models.CharField(max_length=100)
is_frosted = models.BooleanField(default=False)
has_sprinkles = models.NullBooleanField()
baked_date = models.DateField(null=True)
baked_time = models.TimeField(null=True)
consumed_at = models.DateTimeField(null=True)
class Meta:
ordering = ('consumed_at',)
def __str__(self):
return self.name
__test__ = {'API_TESTS': """
# No donuts are in the system yet.
>>> Donut.objects.all()
[]
>>> d = Donut(name='Apple Fritter')
# Ensure we're getting True and False, not 0 and 1
>>> d.is_frosted
False
>>> d.has_sprinkles
>>> d.has_sprinkles = True
>>> d.has_sprinkles == True
True
>>> d.save()
>>> d2 = Donut.objects.all()[0]
>>> d2
<Donut: Apple Fritter>
>>> d2.is_frosted == False
True
>>> d2.has_sprinkles == True
True
>>> import datetime
>>> d2.baked_date = datetime.date(year=1938, month=6, day=4)
>>> d2.baked_time = datetime.time(hour=5, minute=30)
>>> d2.consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
>>> d2.save()
>>> d3 = Donut.objects.all()[0]
>>> d3.baked_date
datetime.date(1938, 6, 4)
>>> d3.baked_time
datetime.time(5, 30)
>>> d3.consumed_at
datetime.datetime(2007, 4, 20, 16, 19, 59)
# Year boundary tests (ticket #3689)
>>> d = Donut(name='Date Test 2007', baked_date=datetime.datetime(year=2007, month=12, day=31), consumed_at=datetime.datetime(year=2007, month=12, day=31, hour=23, minute=59, second=59))
>>> d.save()
>>> d1 = Donut(name='Date Test 2006', baked_date=datetime.datetime(year=2006, month=1, day=1), consumed_at=datetime.datetime(year=2006, month=1, day=1))
>>> d1.save()
>>> Donut.objects.filter(baked_date__year=2007)
[<Donut: Date Test 2007>]
>>> Donut.objects.filter(baked_date__year=2006)
[<Donut: Date Test 2006>]
>>> Donut.objects.filter(consumed_at__year=2007).order_by('name')
[<Donut: Apple Fritter>, <Donut: Date Test 2007>]
>>> Donut.objects.filter(consumed_at__year=2006)
[<Donut: Date Test 2006>]
>>> Donut.objects.filter(consumed_at__year=2005)
[]
>>> Donut.objects.filter(consumed_at__year=2008)
[]
"""}
A failing test for #8354
git-svn-id: 554f83ef17aa7291f84efa897c1acfc5d0035373@8799 bcc190cf-cafb-0310-a4f2-bffc1f526a37
"""
This is a basic model to test saving and loading boolean and date-related
types, which in the past were problematic for some database backends.
"""
from django.db import models
from django.conf import settings
class Donut(models.Model):
name = models.CharField(max_length=100)
is_frosted = models.BooleanField(default=False)
has_sprinkles = models.NullBooleanField()
baked_date = models.DateField(null=True)
baked_time = models.TimeField(null=True)
consumed_at = models.DateTimeField(null=True)
class Meta:
ordering = ('consumed_at',)
def __str__(self):
return self.name
__test__ = {'API_TESTS': """
# No donuts are in the system yet.
>>> Donut.objects.all()
[]
>>> d = Donut(name='Apple Fritter')
# Ensure we're getting True and False, not 0 and 1
>>> d.is_frosted
False
>>> d.has_sprinkles
>>> d.has_sprinkles = True
>>> d.has_sprinkles == True
True
>>> d.save()
>>> d2 = Donut.objects.all()[0]
>>> d2
<Donut: Apple Fritter>
>>> d2.is_frosted == False
True
>>> d2.has_sprinkles == True
True
>>> import datetime
>>> d2.baked_date = datetime.date(year=1938, month=6, day=4)
>>> d2.baked_time = datetime.time(hour=5, minute=30)
>>> d2.consumed_at = datetime.datetime(year=2007, month=4, day=20, hour=16, minute=19, second=59)
>>> d2.save()
>>> d3 = Donut.objects.all()[0]
>>> d3.baked_date
datetime.date(1938, 6, 4)
>>> d3.baked_time
datetime.time(5, 30)
>>> d3.consumed_at
datetime.datetime(2007, 4, 20, 16, 19, 59)
# Year boundary tests (ticket #3689)
>>> d = Donut(name='Date Test 2007', baked_date=datetime.datetime(year=2007, month=12, day=31), consumed_at=datetime.datetime(year=2007, month=12, day=31, hour=23, minute=59, second=59))
>>> d.save()
>>> d1 = Donut(name='Date Test 2006', baked_date=datetime.datetime(year=2006, month=1, day=1), consumed_at=datetime.datetime(year=2006, month=1, day=1))
>>> d1.save()
>>> Donut.objects.filter(baked_date__year=2007)
[<Donut: Date Test 2007>]
>>> Donut.objects.filter(baked_date__year=2006)
[<Donut: Date Test 2006>]
>>> Donut.objects.filter(consumed_at__year=2007).order_by('name')
[<Donut: Apple Fritter>, <Donut: Date Test 2007>]
>>> Donut.objects.filter(consumed_at__year=2006)
[<Donut: Date Test 2006>]
>>> Donut.objects.filter(consumed_at__year=2005)
[]
>>> Donut.objects.filter(consumed_at__year=2008)
[]
# TZ-aware datetimes (#8354).
>>> from django.utils import tzinfo
>>> dt = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=tzinfo.FixedOffset(0))
>>> d = Donut(name='Bear claw', consumed_at=dt)
>>> d.save()
>>> Donut.objects.filter(consumed_at=dt)
[<Donut: Bear claw>]
"""}
|
95ba75b0-2d5f-11e5-87e9-b88d120fff5e
95c462b5-2d5f-11e5-b7d5-b88d120fff5e
95c462b5-2d5f-11e5-b7d5-b88d120fff5e |
# -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions for the metabolic model test suite.
"""
from __future__ import absolute_import
from memote.basic import *
from memote.syntax import *
from memote.consistency import *
fix wrong absolute import
# -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions for the metabolic model test suite.
"""
from __future__ import absolute_import
from memote.support.basic import *
from memote.support.syntax import *
from memote.support.consistency import *
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, time
from threading import Thread
from PIL import Image
from flask import current_app
from app import db, imgs
from app.models import Image as Img
from app.models import Post
from helpers.text import get_all_imgs
_image_thread = None
def jpeg_convert(infile):
""" Try to convert and compress an image to jpeg"""
f, e = os.path.splitext(infile)
outfile = f + '.jpg'
try:
img = Image.open(infile)
base_width = 932
w, h = img.size
if w > base_width:
ratio = base_width/w
new_height = int(h*ratio)
img = img.resize((base_width, new_height), Image.ANTIALIAS)
img.save(outfile, dpi=[100,100], quality=80)
except IOError:
current_app.logger.exception('Could not save file: ')
return os.path.basename(infile)
return os.path.basename(outfile)
def crop_image(infile):
""" Crop an image. Check width and height, and crop according to the lower
parameter from the center"""
f, e = os.path.splitext(infile)
outfile = f + '_crop' + e
original = Image.open(infile)
w, h = original.size
max_width = 152
if w < h:
l = 0
r = w
t = (h // 2) - (w // 2)
b = (h // 2) + (w // 2)
elif h < w:
l = (w // 2) - (h // 2)
r = (w // 2) + (h // 2)
t = 0
b = h
cropped = original.crop((l,t,r,b))
w, h = cropped.size
if w > max_width and h > max_width:
cropped = cropped.resize((max_width, max_width), Image.ANTIALIAS)
cropped.save(outfile)
def remove_images(app):
from datetime import datetime
while True:
time.sleep(10)
conf = app.config['IMAGE_DELETE']
with app.app_context():
if ( datetime.utcnow().hour in conf['TIME_OF_DAY'] and
datetime.utcnow().weekday() in conf['WEEKDAY'] ):
images = Img.get_all_imgs()
db_imgs = [img.location + img.filename for img in images]
posts = Post.get_all()
post_imgs = get_all_imgs((post.body_html for post in posts))
diff_imgs = set(db_imgs) - set(post_imgs)
if diff_imgs:
app.logger.debug('Images found in db: {}'.format(db_imgs))
app.logger.debug('Images found in posts: {}'.format(db_imgs))
app.logger.debug('Images to delete: {}'.format(db_imgs))
for i in images:
if i.location + i.filename in diff_imgs:
if os.path.isfile(imgs.path(i.filename)):
os.remove(imgs.path(i.filename))
f, e = os.path.splitext(i.filename)
if os.path.isfile(imgs.path(f + '_crop' + e)):
os.remove(imgs.path(f + '_crop' + e))
db.session.delete(i)
db.session.commit()
def start_image_deletion_thread():
if not current_app.config['TESTING']:
global _image_thread
if _image_thread is None:
_image_thread = Thread(target=remove_images,
args=[current_app._get_current_object()])
current_app.logger.debug('Starting image deletion thread')
_image_thread.start()
Increase the delay in the image deletion thread to 1800 seconds
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, time
from threading import Thread
from PIL import Image
from flask import current_app
from app import db, imgs
from app.models import Image as Img
from app.models import Post
from helpers.text import get_all_imgs
_image_thread = None
def jpeg_convert(infile):
""" Try to convert and compress an image to jpeg"""
f, e = os.path.splitext(infile)
outfile = f + '.jpg'
try:
img = Image.open(infile)
base_width = 932
w, h = img.size
if w > base_width:
ratio = base_width/w
new_height = int(h*ratio)
img = img.resize((base_width, new_height), Image.ANTIALIAS)
img.save(outfile, dpi=[100,100], quality=80)
except IOError:
current_app.logger.exception('Could not save file: ')
return os.path.basename(infile)
return os.path.basename(outfile)
def crop_image(infile):
""" Crop an image. Check width and height, and crop according to the lower
parameter from the center"""
f, e = os.path.splitext(infile)
outfile = f + '_crop' + e
original = Image.open(infile)
w, h = original.size
max_width = 152
if w < h:
l = 0
r = w
t = (h // 2) - (w // 2)
b = (h // 2) + (w // 2)
elif h < w:
l = (w // 2) - (h // 2)
r = (w // 2) + (h // 2)
t = 0
b = h
cropped = original.crop((l,t,r,b))
w, h = cropped.size
if w > max_width and h > max_width:
cropped = cropped.resize((max_width, max_width), Image.ANTIALIAS)
cropped.save(outfile)
def remove_images(app):
from datetime import datetime
while True:
time.sleep(1800)
conf = app.config['IMAGE_DELETE']
with app.app_context():
if ( datetime.utcnow().hour in conf['TIME_OF_DAY'] and
datetime.utcnow().weekday() in conf['WEEKDAY'] ):
images = Img.get_all_imgs()
db_imgs = [img.location + img.filename for img in images]
posts = Post.get_all()
post_imgs = get_all_imgs((post.body_html for post in posts))
diff_imgs = set(db_imgs) - set(post_imgs)
if diff_imgs:
app.logger.debug('Images found in db: {}'.format(db_imgs))
app.logger.debug('Images found in posts: {}'.format(db_imgs))
app.logger.debug('Images to delete: {}'.format(db_imgs))
for i in images:
if i.location + i.filename in diff_imgs:
if os.path.isfile(imgs.path(i.filename)):
os.remove(imgs.path(i.filename))
f, e = os.path.splitext(i.filename)
if os.path.isfile(imgs.path(f + '_crop' + e)):
os.remove(imgs.path(f + '_crop' + e))
db.session.delete(i)
db.session.commit()
def start_image_deletion_thread():
if not current_app.config['TESTING']:
global _image_thread
if _image_thread is None:
_image_thread = Thread(target=remove_images,
args=[current_app._get_current_object()])
current_app.logger.debug('Starting image deletion thread')
_image_thread.start()
|
import csv
from django.db import connection
from django.conf import settings
from django.db import IntegrityError, DataError, ProgrammingError
from django.db.models.loading import get_model
from django.core.management.base import LabelCommand
from calaccess_raw.management.commands import CalAccessCommand
class Command(CalAccessCommand, LabelCommand):
help = 'Load a cleaned CalAccess file for a model into the database'
args = '<model name>'
# Trick for reformating date strings in source data so that they can
# be gobbled up by MySQL. You'll see how below.
date_sql = "DATE_FORMAT(str_to_date(@`%s`, '%%c/%%e/%%Y'), '%%Y-%%m-%%d')"
def handle_label(self, label, **options):
self.verbosity = options.get("verbosity")
self.cursor = connection.cursor()
self.cursor.execute("""SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0;""")
self.cursor.execute("""SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE=TRADITIONAL;""")
self.load(label)
self.cursor.execute("""SET SQL_NOTES=@OLD_SQL_NOTES;""")
self.cursor.execute("""SET SQL_MODE=@OLD_SQL_MODE;""")
def get_hdrs_and_cnt(self, csv_path):
"""
Get the headers and the line count
from a specified csv file
"""
with open(csv_path) as infile:
csv_reader = csv.reader(infile)
hdrs = csv_reader.next()
with open(csv_path) as infile:
csv_count = len(infile.readlines()) - 1
return hdrs, csv_count
def _make_date_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
It cleans "date" types to be properly formatted for postgresql
-----
it takes in format 'MM/DD/YYYY' or longer timestamps and strips
the first 10 characters
if empty it enters '01/01/1900'
"""
return """
,CASE
WHEN "%s" IS NOT NULL AND "%s" != ''
THEN to_date(substring("%s" from 1 for 10), 'MM/DD/YYYY')
WHEN "%s" = ''
THEN to_date('01/01/1900', 'MM/DD/YYYY')
END AS "%s"\n""" % (_col, _col, _col, _col, _col)
def _make_int_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
It cleans "int" types to be properly formatted for postgresql
(and helps to clean up incorrectly entered data)
"""
return """
,CASE
WHEN "%s" = ''
THEN NULL
WHEN "%s" = ' '
THEN NULL
WHEN "%s" = ' '
THEN NULL
WHEN "%s" = 'Y'
THEN 1
WHEN "%s" = 'y'
THEN 1
WHEN "%s" = 'X'
THEN 1
WHEN "%s" = 'x'
THEN 1
WHEN "%s" = 'N'
THEN 0
WHEN "%s" = 'n'
THEN 0
WHEN "%s" IS NOT NULL
THEN "%s"::int
END AS "%s"\n""" % (
_col, _col, _col, _col, _col, _col,
_col, _col, _col, _col, _col, _col)
def _make_numeric_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
It cleans "numeric" types to be properly
formatted for postgresql (and clean up incorrectly entered data)
----
If the data is blank or Null 0.0 will be inserted
"""
return """
,CASE
WHEN "%s" = ''
THEN 0.0
WHEN "%s" IS NULL
THEN 0.0
WHEN "%s" IS NOT NULL
THEN "%s"::numeric
END AS "%s"\n""" % (_col, _col, _col, _col, _col)
def _make_float_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
It cleans "numeric" types to be properly
formatted for postgresql (and clean up incorrectly entered data)
----
If the data is blank or Null 0.0 will be inserted
"""
return """
,CASE
WHEN "%s" = ''
THEN 0.0
WHEN "%s" IS NULL
THEN 0.0
WHEN "%s" IS NOT NULL
THEN "%s"::double precision
END AS "%s"\n""" % (_col, _col, _col, _col, _col)
def _make_timestamp_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
---
It cleans "timestamp" from a form "7/9/2014 12:00:00 AM" or
enters it as '01/01/1900 1:00:00 AM' if null or empty
"""
return """
,CASE
WHEN "%s" IS NOT NULL AND "%s" != ''
THEN to_timestamp("%s", 'MM/DD/YYYY HH12:MI:SS AM')
WHEN "%s" = ''
THEN to_timestamp('01/01/1900 1:00:00 AM', \
'MM/DD/YYYY HH12:MI:SS AM')
END AS "%s"\n""" % (_col, _col, _col, _col, _col)
def _make_special_not_null_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
---
it takes it empty columns that are in the csv and formats
then to be inserted correctly
"""
return """
,CASE
WHEN "%s" IS NULL
THEN ''
END AS "%s"\n""" % (_col, _col)
def _get_col_types(self, model, csv_headers, n_2_t_map):
"""
Get the columns postgresql will have to treate
differently on a case by base basis on insert
"""
int_cols = []
numeric_cols = []
date_cols = []
time_cols = []
regular_cols = []
double_cols = []
empty_cols = []
# fill in those column types
for col in model._meta.fields:
if col.db_type(connection).startswith('integer'):
int_cols.append(col.db_column)
elif col.db_type(connection).startswith('numeric'):
numeric_cols.append(col.db_column)
elif col.db_type(connection).startswith('date'):
date_cols.append(col.db_column)
elif col.db_type(connection).startswith('timestamp'):
time_cols.append(col.db_column)
elif col.db_type(connection).startswith('double'):
double_cols.append(col.db_column)
else:
if col.db_column is not None and col.db_column in csv_headers:
regular_cols.append(col.db_column)
csv_col_types = [] # column with its types
for col in csv_headers:
if col in regular_cols:
csv_col_types.append("\"" + col + "\"\t" + n_2_t_map[col])
else:
csv_col_types.append("\"" + col + "\"\ttext")
extra_cols = set([col.db_column for col in
model._meta.fields]).difference(set(csv_headers))
for col in extra_cols:
if col is not None:
empty_cols.append(col)
return csv_col_types, {
"int_cols": int_cols,
"numeric_cols": numeric_cols,
"date_cols": date_cols,
"time_cols": time_cols,
"regular_cols": regular_cols,
"double_cols": double_cols,
"empty_cols": empty_cols
}
def _make_pg_select(self, regular_cols, special_cols):
select_statement = "SELECT \""
if not regular_cols:
select_statement += "\", \"".join(["DUMMY_COLUMN"])
else:
select_statement += "\", \"".join(regular_cols)
select_statement += "\"\n"
# add in special formatting
for col_type, ls in special_cols.items():
if col_type == "int_cols":
select_statement += '\n'.join(
[self._make_int_case(col) for col in ls]
)
elif col_type == "numeric_cols":
select_statement += '\n'.join(
[self._make_numeric_case(col) for col in ls]
)
elif col_type == "date_cols":
select_statement += '\n'.join(
[self._make_date_case(col) for col in ls]
)
elif col_type == "time_cols":
select_statement += '\n'.join(
[self._make_timestamp_case(col) for col in ls]
)
elif col_type == "double_cols":
select_statement += '\n'.join(
[self._make_float_case(col) for col in ls]
)
elif col_type == "empty_cols":
select_statement += '\n'.join(
[self._make_special_not_null_case(col) for col in ls]
)
# finalize from statement
select_statement += "FROM temporary_table;"
return select_statement
def load_postgresql(self, model, csv_path):
"""
Takes a model and a csv_path and loads it into postgresql
"""
c = connection.cursor()
try:
c.execute('DROP TABLE temporary_table;')
except ProgrammingError:
pass
c.execute('TRUNCATE TABLE "%s"' % model._meta.db_table)
# get the headers and the count
hdrs, csv_count = self.get_hdrs_and_cnt(csv_path)
n_2_t_map = {} # name to type map for columns
for col in model._meta.fields:
n_2_t_map[col.db_column] = col.db_type(connection)
csv_col_types, special_cols = self._get_col_types(
model, hdrs, n_2_t_map
)
regular_cols = special_cols.pop('regular_cols')
empty_cols = special_cols['empty_cols']
# make a big flat list for later insertion into the true table
flat_special_cols = [itm for sl in special_cols.values() for itm in sl]
# create the temp table w/ columns with types
try:
c.execute("CREATE TABLE \"temporary_table\" (%s);"
% ',\n'.join(csv_col_types))
except ProgrammingError:
self.failure("Temporary table already exists")
temp_insert = """COPY "temporary_table"
FROM '%s'
CSV
HEADER;""" % (csv_path)
try:
c.execute(temp_insert) # insert everything into the temp table
except DataError as e:
print "initial insert dataerror error, ", e
for col in empty_cols:
# for tables where we create cases for every column and
# we need a dummy column in order to migrate from table to table
c.execute("ALTER TABLE temporary_table \
ADD COLUMN \"%s\" text" % col)
# build our insert statement
insert_statement = "INSERT INTO \"%s\" (\"" % model._meta.db_table
if not regular_cols:
try:
c.execute("ALTER TABLE temporary_table \
ADD COLUMN \"DUMMY_COLUMN\" text")
c.execute("ALTER TABLE \"%s\" ADD COLUMN \"%s\" text"
% (model._meta.db_table, "DUMMY_COLUMN"))
insert_col_list = "\", \"".join(
["DUMMY_COLUMN"] + flat_special_cols
)
except ProgrammingError as e:
self.failure("Error Altering Table: %s" % e)
else:
insert_col_list = "\", \"".join(
regular_cols + flat_special_cols
)
insert_statement += insert_col_list
insert_statement += "\")\n"
# add in the select part for table migration
select_statement = self._make_pg_select(regular_cols, special_cols)
try:
# print insert_statement + select_statement
c.execute(insert_statement + select_statement)
except DataError as e:
self.failure(
"Data Error Inserting Data Into Table: %s" % e)
except ProgrammingError as e:
self.failure(
"Programming Error Inserting Data Into Table: %s" % e)
except IntegrityError as e:
self.failure(
"Integrity Error Inserting Data Into Table: %s" % e)
# c.execute('DROP TABLE temporary_table;')
if not regular_cols:
c.execute(
"ALTER TABLE \"%s\" DROP COLUMN \"%s\""
% (model._meta.db_table, "DUMMY_COLUMN")
)
model_count = model.objects.count()
self.finish_load_message(model_count, csv_count)
def load_mysql(self, model, csv_path):
c = connection.cursor()
# flush
c.execute('TRUNCATE TABLE %s' % model._meta.db_table)
>>>>>>> 5a10c8bafad55be66360673da9e9303e2e7e4683
# Build the MySQL LOAD DATA INFILE command
bulk_sql_load_part_1 = '''
LOAD DATA LOCAL INFILE '%s'
INTO TABLE %s
FIELDS TERMINATED BY ','
OPTIONALLY ENCLOSED BY '"'
LINES TERMINATED BY '\\r\\n'
IGNORE 1 LINES
(
''' % (csv_path, model._meta.db_table)
infile = open(csv_path)
csv_reader = csv.reader(infile)
hdrs = csv_reader.next()
infile.close()
infile = open(csv_path)
csv_record_cnt = len(infile.readlines()) - 1
infile.close()
header_sql_list = []
date_set_list = []
for h in hdrs:
# If it is a date field, we need to reformat the data
# so that MySQL will properly parse it on the way in.
if h in model.DATE_FIELDS:
header_sql_list.append('@`%s`' % h)
date_set_list.append(
"`%s` = %s" % (h, self.date_sql % h)
)
else:
header_sql_list.append('`%s`' % h)
bulk_sql_load = bulk_sql_load_part_1 + ','.join(header_sql_list) + ')'
if date_set_list:
bulk_sql_load += " set %s" % ",".join(date_set_list)
# Run the query
cnt = self.cursor.execute(bulk_sql_load)
# Report back on how we did
self.finish_load_message(cnt, csv_record_cnt)
def finish_load_message(self, model_count, csv_count):
"""
The message displayed about whether or not a load finished
successfully.
"""
if self.verbosity:
if model_count != csv_count:
msg = ' Table Record count doesn\'t match CSV. \
Table: %s\tCSV: %s'
self.failure(msg % (
model_count,
csv_count,
))
def load(self, model_name):
"""
Loads the source CSV for the provided model.
"""
if self.verbosity:
self.log(" Loading %s" % model_name)
model = get_model("calaccess_raw", model_name)
csv_path = model.objects.get_csv_path()
# Flush
self.cursor.execute('TRUNCATE TABLE %s' % model._meta.db_table)
engine = settings.DATABASES['default']['ENGINE']
if engine == 'django.db.backends.mysql':
self.load_mysql(model, csv_path)
elif engine == 'django.db.backends.postgresql_psycopg2':
self.load_postgresql(model, csv_path)
else:
self.failure("Sorry that database is not supported")
Remove MySQL only experiments
import csv
from django.db import connection
from django.conf import settings
from django.db import IntegrityError, DataError, ProgrammingError
from django.db.models.loading import get_model
from django.core.management.base import LabelCommand
from calaccess_raw.management.commands import CalAccessCommand
class Command(CalAccessCommand, LabelCommand):
help = 'Load a cleaned CalAccess file for a model into the database'
args = '<model name>'
# Trick for reformating date strings in source data so that they can
# be gobbled up by MySQL. You'll see how below.
date_sql = "DATE_FORMAT(str_to_date(@`%s`, '%%c/%%e/%%Y'), '%%Y-%%m-%%d')"
def handle_label(self, label, **options):
self.verbosity = options.get("verbosity")
self.cursor = connection.cursor()
self.load(label)
def get_hdrs_and_cnt(self, csv_path):
"""
Get the headers and the line count
from a specified csv file
"""
with open(csv_path) as infile:
csv_reader = csv.reader(infile)
hdrs = csv_reader.next()
with open(csv_path) as infile:
csv_count = len(infile.readlines()) - 1
return hdrs, csv_count
def _make_date_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
It cleans "date" types to be properly formatted for postgresql
-----
it takes in format 'MM/DD/YYYY' or longer timestamps and strips
the first 10 characters
if empty it enters '01/01/1900'
"""
return """
,CASE
WHEN "%s" IS NOT NULL AND "%s" != ''
THEN to_date(substring("%s" from 1 for 10), 'MM/DD/YYYY')
WHEN "%s" = ''
THEN to_date('01/01/1900', 'MM/DD/YYYY')
END AS "%s"\n""" % (_col, _col, _col, _col, _col)
def _make_int_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
It cleans "int" types to be properly formatted for postgresql
(and helps to clean up incorrectly entered data)
"""
return """
,CASE
WHEN "%s" = ''
THEN NULL
WHEN "%s" = ' '
THEN NULL
WHEN "%s" = ' '
THEN NULL
WHEN "%s" = 'Y'
THEN 1
WHEN "%s" = 'y'
THEN 1
WHEN "%s" = 'X'
THEN 1
WHEN "%s" = 'x'
THEN 1
WHEN "%s" = 'N'
THEN 0
WHEN "%s" = 'n'
THEN 0
WHEN "%s" IS NOT NULL
THEN "%s"::int
END AS "%s"\n""" % (
_col, _col, _col, _col, _col, _col,
_col, _col, _col, _col, _col, _col)
def _make_numeric_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
It cleans "numeric" types to be properly
formatted for postgresql (and clean up incorrectly entered data)
----
If the data is blank or Null 0.0 will be inserted
"""
return """
,CASE
WHEN "%s" = ''
THEN 0.0
WHEN "%s" IS NULL
THEN 0.0
WHEN "%s" IS NOT NULL
THEN "%s"::numeric
END AS "%s"\n""" % (_col, _col, _col, _col, _col)
def _make_float_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
It cleans "numeric" types to be properly
formatted for postgresql (and clean up incorrectly entered data)
----
If the data is blank or Null 0.0 will be inserted
"""
return """
,CASE
WHEN "%s" = ''
THEN 0.0
WHEN "%s" IS NULL
THEN 0.0
WHEN "%s" IS NOT NULL
THEN "%s"::double precision
END AS "%s"\n""" % (_col, _col, _col, _col, _col)
def _make_timestamp_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
---
It cleans "timestamp" from a form "7/9/2014 12:00:00 AM" or
enters it as '01/01/1900 1:00:00 AM' if null or empty
"""
return """
,CASE
WHEN "%s" IS NOT NULL AND "%s" != ''
THEN to_timestamp("%s", 'MM/DD/YYYY HH12:MI:SS AM')
WHEN "%s" = ''
THEN to_timestamp('01/01/1900 1:00:00 AM', \
'MM/DD/YYYY HH12:MI:SS AM')
END AS "%s"\n""" % (_col, _col, _col, _col, _col)
def _make_special_not_null_case(self, _col):
"""
This method takes in a column name and generates a
PostgreSQL "case" for correct insertion into the primary table.
---
it takes it empty columns that are in the csv and formats
then to be inserted correctly
"""
return """
,CASE
WHEN "%s" IS NULL
THEN ''
END AS "%s"\n""" % (_col, _col)
def _get_col_types(self, model, csv_headers, n_2_t_map):
"""
Get the columns postgresql will have to treate
differently on a case by base basis on insert
"""
int_cols = []
numeric_cols = []
date_cols = []
time_cols = []
regular_cols = []
double_cols = []
empty_cols = []
# fill in those column types
for col in model._meta.fields:
if col.db_type(connection).startswith('integer'):
int_cols.append(col.db_column)
elif col.db_type(connection).startswith('numeric'):
numeric_cols.append(col.db_column)
elif col.db_type(connection).startswith('date'):
date_cols.append(col.db_column)
elif col.db_type(connection).startswith('timestamp'):
time_cols.append(col.db_column)
elif col.db_type(connection).startswith('double'):
double_cols.append(col.db_column)
else:
if col.db_column is not None and col.db_column in csv_headers:
regular_cols.append(col.db_column)
csv_col_types = [] # column with its types
for col in csv_headers:
if col in regular_cols:
csv_col_types.append("\"" + col + "\"\t" + n_2_t_map[col])
else:
csv_col_types.append("\"" + col + "\"\ttext")
extra_cols = set([col.db_column for col in
model._meta.fields]).difference(set(csv_headers))
for col in extra_cols:
if col is not None:
empty_cols.append(col)
return csv_col_types, {
"int_cols": int_cols,
"numeric_cols": numeric_cols,
"date_cols": date_cols,
"time_cols": time_cols,
"regular_cols": regular_cols,
"double_cols": double_cols,
"empty_cols": empty_cols
}
def _make_pg_select(self, regular_cols, special_cols):
select_statement = "SELECT \""
if not regular_cols:
select_statement += "\", \"".join(["DUMMY_COLUMN"])
else:
select_statement += "\", \"".join(regular_cols)
select_statement += "\"\n"
# add in special formatting
for col_type, ls in special_cols.items():
if col_type == "int_cols":
select_statement += '\n'.join(
[self._make_int_case(col) for col in ls]
)
elif col_type == "numeric_cols":
select_statement += '\n'.join(
[self._make_numeric_case(col) for col in ls]
)
elif col_type == "date_cols":
select_statement += '\n'.join(
[self._make_date_case(col) for col in ls]
)
elif col_type == "time_cols":
select_statement += '\n'.join(
[self._make_timestamp_case(col) for col in ls]
)
elif col_type == "double_cols":
select_statement += '\n'.join(
[self._make_float_case(col) for col in ls]
)
elif col_type == "empty_cols":
select_statement += '\n'.join(
[self._make_special_not_null_case(col) for col in ls]
)
# finalize from statement
select_statement += "FROM temporary_table;"
return select_statement
def load_postgresql(self, model, csv_path):
"""
Takes a model and a csv_path and loads it into postgresql
"""
c = connection.cursor()
try:
c.execute('DROP TABLE temporary_table;')
except ProgrammingError:
pass
c.execute('TRUNCATE TABLE "%s"' % model._meta.db_table)
# get the headers and the count
hdrs, csv_count = self.get_hdrs_and_cnt(csv_path)
n_2_t_map = {} # name to type map for columns
for col in model._meta.fields:
n_2_t_map[col.db_column] = col.db_type(connection)
csv_col_types, special_cols = self._get_col_types(
model, hdrs, n_2_t_map
)
regular_cols = special_cols.pop('regular_cols')
empty_cols = special_cols['empty_cols']
# make a big flat list for later insertion into the true table
flat_special_cols = [itm for sl in special_cols.values() for itm in sl]
# create the temp table w/ columns with types
try:
c.execute("CREATE TABLE \"temporary_table\" (%s);"
% ',\n'.join(csv_col_types))
except ProgrammingError:
self.failure("Temporary table already exists")
temp_insert = """COPY "temporary_table"
FROM '%s'
CSV
HEADER;""" % (csv_path)
try:
c.execute(temp_insert) # insert everything into the temp table
except DataError as e:
print "initial insert dataerror error, ", e
for col in empty_cols:
# for tables where we create cases for every column and
# we need a dummy column in order to migrate from table to table
c.execute("ALTER TABLE temporary_table \
ADD COLUMN \"%s\" text" % col)
# build our insert statement
insert_statement = "INSERT INTO \"%s\" (\"" % model._meta.db_table
if not regular_cols:
try:
c.execute("ALTER TABLE temporary_table \
ADD COLUMN \"DUMMY_COLUMN\" text")
c.execute("ALTER TABLE \"%s\" ADD COLUMN \"%s\" text"
% (model._meta.db_table, "DUMMY_COLUMN"))
insert_col_list = "\", \"".join(
["DUMMY_COLUMN"] + flat_special_cols
)
except ProgrammingError as e:
self.failure("Error Altering Table: %s" % e)
else:
insert_col_list = "\", \"".join(
regular_cols + flat_special_cols
)
insert_statement += insert_col_list
insert_statement += "\")\n"
# add in the select part for table migration
select_statement = self._make_pg_select(regular_cols, special_cols)
try:
# print insert_statement + select_statement
c.execute(insert_statement + select_statement)
except DataError as e:
self.failure(
"Data Error Inserting Data Into Table: %s" % e)
except ProgrammingError as e:
self.failure(
"Programming Error Inserting Data Into Table: %s" % e)
except IntegrityError as e:
self.failure(
"Integrity Error Inserting Data Into Table: %s" % e)
# c.execute('DROP TABLE temporary_table;')
if not regular_cols:
c.execute(
"ALTER TABLE \"%s\" DROP COLUMN \"%s\""
% (model._meta.db_table, "DUMMY_COLUMN")
)
model_count = model.objects.count()
self.finish_load_message(model_count, csv_count)
def load_mysql(self, model, csv_path):
c = connection.cursor()
# flush
c.execute('TRUNCATE TABLE %s' % model._meta.db_table)
>>>>>>> 5a10c8bafad55be66360673da9e9303e2e7e4683
# Build the MySQL LOAD DATA INFILE command
bulk_sql_load_part_1 = '''
LOAD DATA LOCAL INFILE '%s'
INTO TABLE %s
FIELDS TERMINATED BY ','
OPTIONALLY ENCLOSED BY '"'
LINES TERMINATED BY '\\r\\n'
IGNORE 1 LINES
(
''' % (csv_path, model._meta.db_table)
infile = open(csv_path)
csv_reader = csv.reader(infile)
hdrs = csv_reader.next()
infile.close()
infile = open(csv_path)
csv_record_cnt = len(infile.readlines()) - 1
infile.close()
header_sql_list = []
date_set_list = []
for h in hdrs:
# If it is a date field, we need to reformat the data
# so that MySQL will properly parse it on the way in.
if h in model.DATE_FIELDS:
header_sql_list.append('@`%s`' % h)
date_set_list.append(
"`%s` = %s" % (h, self.date_sql % h)
)
else:
header_sql_list.append('`%s`' % h)
bulk_sql_load = bulk_sql_load_part_1 + ','.join(header_sql_list) + ')'
if date_set_list:
bulk_sql_load += " set %s" % ",".join(date_set_list)
# Run the query
cnt = self.cursor.execute(bulk_sql_load)
# Report back on how we did
self.finish_load_message(cnt, csv_record_cnt)
def finish_load_message(self, model_count, csv_count):
"""
The message displayed about whether or not a load finished
successfully.
"""
if self.verbosity:
if model_count != csv_count:
msg = ' Table Record count doesn\'t match CSV. \
Table: %s\tCSV: %s'
self.failure(msg % (
model_count,
csv_count,
))
def load(self, model_name):
"""
Loads the source CSV for the provided model.
"""
if self.verbosity:
self.log(" Loading %s" % model_name)
model = get_model("calaccess_raw", model_name)
csv_path = model.objects.get_csv_path()
# Flush
self.cursor.execute('TRUNCATE TABLE %s' % model._meta.db_table)
engine = settings.DATABASES['default']['ENGINE']
if engine == 'django.db.backends.mysql':
self.load_mysql(model, csv_path)
elif engine == 'django.db.backends.postgresql_psycopg2':
self.load_postgresql(model, csv_path)
else:
self.failure("Sorry that database is not supported")
|
"""
The module provides the base class for Monads in Python.
Monads
"""
from abc import ABCMeta, abstractmethod
from fp import atom
from six import moves
import six
# atoms
noop = atom("noop")
class Monad(object):
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def ret(cls, value):
"""
The return function for the monad
"""
@abstractmethod
def bind(self, f):
"""
Pass the value of this monad into the action f
"""
@classmethod
@abstractmethod
def fail(cls, error):
"""
Return the failure case of the monad.
"""
def bind_(self, f):
"""
Call the action f, throwing away the value of this monad
"""
return self.bind(lambda _: f())
@classmethod
def sequence(cls, ms):
"""
execute the monadic actions in ms, returning the result
>>> from fp.monads.maybe import Just, Nothing, Maybe
>>> Maybe.sequence([Just(1), Nothing, Just(2)])
Nothing
>>> Maybe.sequence([Just(1), Just(2)])
Just([1, 2])
"""
ret = cls.ret([])
def append_and_return(xs, x):
xs.append(x)
return xs
for m in ms:
ret = ret.bind(
lambda xs: m.bind(
lambda x: cls.ret(append_and_return(xs, x))))
return ret
@classmethod
def sequence_(cls, ms):
"""
Execute the
>>> from fp.monads.iomonad import printLn, IO
>>> actions = [printLn("Hello"), printLn("World")]
>>> IO.sequence_(actions).run()
Hello
World
"""
def reducer(acc, m):
return acc.bind_(lambda: m)
return moves.reduce(
reducer,
ms,
cls.ret(noop)
)
@classmethod
def sequence_dict(cls, d):
"""
Perform all the actions inside a dictionary and return the result
>>> from fp.monads.maybe import Maybe, Just
>>> Maybe.sequence_dict({"foo": Just(1), "bar": Just(2)}) == \
Just({'foo': 1, 'bar': 2})
True
"""
ret = cls.ret({})
def store_and_return(d, k, v):
d[k] = v
return d
for k, m in six.iteritems(d):
ret = ret.bind(
lambda d: m.bind(
lambda v: cls.ret(store_and_return(d, k, v))))
return ret
@classmethod
def mapM(cls, arrow, items):
"""
Map an arrow accross a list of values:
>>> from fp import p
>>> from fp.monads.maybe import Maybe
>>> maybe_int = p(Maybe.catch, int)
>>> Maybe.mapM(maybe_int, ["1", "2"])
Just([1, 2])
>>> Maybe.mapM(maybe_int, ["1", "a"])
Nothing
"""
return cls.sequence(moves.map(arrow, items))
@classmethod
def mapM_(cls, arrow, items):
"""
>>> from fp.monads.iomonad import IO, printLn
>>> action = IO.mapM_(printLn, ["Hello", "World"])
>>> action.run()
Hello
World
>>> action = IO.mapM_(printLn, ["Hello", "World"])
>>> action.run()
Hello
World
"""
return cls.sequence_(moves.map(arrow, items))
@classmethod
def arrow_cl(cls, arrow1, arrow2):
"""Left to Right arrow composition
This expressions means: maybe cast the string as an int, then
maybe add 1
>>> from fp.monads.maybe import Maybe, Just
>>> from fp import p
>>> maybe_int = p(Maybe.catch, int)
>>> string_to_plus = Maybe.arrow_cl(maybe_int, lambda x: Just(x+1))
>>> string_to_plus("1")
Just(2)
>>> string_to_plus("a")
Nothing
"""
return lambda x: arrow1(x).bind(arrow2)
@classmethod
def arrow_cr(cls, arrow1, arrow2):
"""Right to Left arrow composition
This expression means the same as: maybe cast the string as an
int, then maybe add 1 but read right to left.
>>> from fp.monads.maybe import Maybe
>>> from fp import p
>>> maybe_int = p(Maybe.catch, int)
>>> string_to_plus = Maybe.arrow_cr(lambda x: Maybe(x+1), maybe_int)
>>> string_to_plus("1")
Just(2)
"""
return cls.arrow_cl(arrow2, arrow1)
@classmethod
def ap(cls, f, *monads, **kwarg_monads):
"""
Monad.ap(f, *args, **kwargs) allows you to call a pure
function within a Monad.
A normal function like `f(x): x + 1` can be made to take a
Maybe and return a Maybe without any boilerplating:
>>> from fp.monads.maybe import Maybe, Just, Nothing
>>> Maybe.ap(lambda x: x+1, Just(1))
Just(2)
>>> Maybe.ap(lambda x: x+1, Nothing)
Nothing
Here's an example with an add function:
>>> import operator
>>> Maybe.ap(operator.add, Just(1), Just(2))
Just(3)
>>> Maybe.ap(operator.add, Nothing, Just(2))
Nothing
>>> Maybe.ap(operator.add, Just(1), Nothing)
Nothing
It even works with kwargs:
>>> from datetime import timedelta
>>> Maybe.ap(timedelta, days=Just(1), seconds=Just(60))
Just(datetime.timedelta(1, 60))
"""
argsM = cls.sequence(monads)
kwargsM = cls.sequence_dict(kwarg_monads)
return argsM.bind(
lambda args: kwargsM.bind(
lambda kwargs: cls.ret(f(*args, **kwargs))))
@classmethod
def filterM(cls, predM, items):
"""
>>> from fp.monads.maybe import Maybe
>>> from fp import even, c, p
>>> maybeInt = p(Maybe.catch, int) # convert int to a Maybe arrow
>>> maybeEven = p(Maybe.ap, even) # lift even into Maybe
>>> Maybe.filterM(c(maybeEven, maybeInt), ["x","1","2","3","4"])
Just(['2', '4'])
"""
ret = []
for x in items:
def filterArrow(b):
if b:
ret.append(x)
boolM = predM(x)
boolM.bind(filterArrow)
return cls.ret(ret)
def when(self, b):
"""
Execute the action when True, return a noop otherwise
>>> from fp.monads.iomonad import printLn
>>> _ = printLn("Hello").when(True).run()
Hello
>>> _ = printLn("Hello").when(False).run()
"""
cls = self.__class__
noopM = cls.ret(noop)
if b:
return self.bind_(lambda: noopM)
else:
return noopM
def unless(self, b):
"""
Execute the action when False, return a noop otherwise
>>> from fp.monads.iomonad import printLn
>>> _ = printLn("Hello").unless(False).run()
Hello
>>> _ = printLn("Hello").unless(True).run()
"""
return self.when(not b)
@classmethod
def catch(cls, f, *args, **kwargs):
"""
Execute the function f(*args, **kwargs) and return the value inside the
monad.
Catch any errors and call the monad's fail method
>>> from fp.monads.maybe import Maybe
>>> from fp.monads.either import Either
>>> from fp.monads.iomonad import IO
>>> Maybe.catch(lambda: {'foo': 'bar'}['foo'])
Just('bar')
>>> Maybe.catch(lambda: {}['foo'])
Nothing
>>> Either.catch(lambda: {'foo': 'bar'}['foo'])
Right('bar')
>>> Either.catch(lambda: {}['foo'])
Left(KeyError('foo',))
>>> IO.catch(lambda: {'foo': 'bar'}['foo']).run()
'bar'
>>> IO.catch(lambda: {}['foo']).run()
Traceback (most recent call last):
...
KeyError: 'foo'
"""
try:
return cls.ret(f(*args, **kwargs))
except Exception as e:
return cls.fail(e)
class MonadPlus(object):
"""
MonadPlus allows a Monad to define what a zero result is and a
method for adding two MonadPlus instances together.
"""
__metaclass__ = ABCMeta
mzero = NotImplemented # MonadPlus sub-classes need to define mzero
@abstractmethod
def mplus(self, y):
"""
An associative operation
"""
@classmethod
def msum(cls, xs):
"""
Reduces a list of MonadPlus instances using its mplus method:
>>> from fp.monads.maybe import Just, Nothing, Maybe
>>> Maybe.msum([Just(1), Nothing, Just(2)])
Just(1)
>>> Maybe.msum([Nothing, Nothing, Just(2)])
Just(2)
>>> Maybe.msum([Nothing, Nothing, Nothing])
Nothing
"""
return moves.reduce(
cls.mplus,
xs,
cls.mzero)
def mfilter(self, pred):
"""
Returns Monad.mzero if the pred is False.
>>> from fp.monads.maybe import Just
>>> from fp import even
>>> Just(4).mfilter(even)
Just(4)
>>> Just(3).mfilter(even)
Nothing
"""
cls = self.__class__
def inner(x):
if pred(x):
return cls.ret(x)
else:
return cls.mzero
return self.bind(inner)
@classmethod
def guard(cls, b):
"""
>>> from fp.monads.maybe import Maybe
>>> Maybe.guard(True).bind_(lambda: Maybe.ret("Hello"))
Just('Hello')
>>> Maybe.guard(False).bind_(lambda: Maybe.ret("Hello"))
Nothing
"""
if b:
return cls.ret(noop)
else:
return cls.mzero
added the map method to monad
"""
The module provides the base class for Monads in Python.
Monads
"""
from abc import ABCMeta, abstractmethod
from fp import atom
from six import moves
import six
# atoms
noop = atom("noop")
class Monad(object):
__metaclass__ = ABCMeta
@classmethod
@abstractmethod
def ret(cls, value):
"""
The return function for the monad
"""
@abstractmethod
def bind(self, f):
"""
Pass the value of this monad into the action f
"""
@classmethod
@abstractmethod
def fail(cls, error):
"""
Return the failure case of the monad.
"""
def bind_(self, f):
"""
Call the action f, throwing away the value of this monad
"""
return self.bind(lambda _: f())
@classmethod
def sequence(cls, ms):
"""
execute the monadic actions in ms, returning the result
>>> from fp.monads.maybe import Just, Nothing, Maybe
>>> Maybe.sequence([Just(1), Nothing, Just(2)])
Nothing
>>> Maybe.sequence([Just(1), Just(2)])
Just([1, 2])
"""
ret = cls.ret([])
def append_and_return(xs, x):
xs.append(x)
return xs
for m in ms:
ret = ret.bind(
lambda xs: m.bind(
lambda x: cls.ret(append_and_return(xs, x))))
return ret
@classmethod
def sequence_(cls, ms):
"""
Execute the
>>> from fp.monads.iomonad import printLn, IO
>>> actions = [printLn("Hello"), printLn("World")]
>>> IO.sequence_(actions).run()
Hello
World
"""
def reducer(acc, m):
return acc.bind_(lambda: m)
return moves.reduce(
reducer,
ms,
cls.ret(noop)
)
@classmethod
def sequence_dict(cls, d):
"""
Perform all the actions inside a dictionary and return the result
>>> from fp.monads.maybe import Maybe, Just
>>> Maybe.sequence_dict({"foo": Just(1), "bar": Just(2)}) == \
Just({'foo': 1, 'bar': 2})
True
"""
ret = cls.ret({})
def store_and_return(d, k, v):
d[k] = v
return d
for k, m in six.iteritems(d):
ret = ret.bind(
lambda d: m.bind(
lambda v: cls.ret(store_and_return(d, k, v))))
return ret
@classmethod
def mapM(cls, arrow, items):
"""
Map an arrow accross a list of values:
>>> from fp import p
>>> from fp.monads.maybe import Maybe
>>> maybe_int = p(Maybe.catch, int)
>>> Maybe.mapM(maybe_int, ["1", "2"])
Just([1, 2])
>>> Maybe.mapM(maybe_int, ["1", "a"])
Nothing
"""
return cls.sequence(moves.map(arrow, items))
@classmethod
def mapM_(cls, arrow, items):
"""
>>> from fp.monads.iomonad import IO, printLn
>>> action = IO.mapM_(printLn, ["Hello", "World"])
>>> action.run()
Hello
World
>>> action = IO.mapM_(printLn, ["Hello", "World"])
>>> action.run()
Hello
World
"""
return cls.sequence_(moves.map(arrow, items))
@classmethod
def arrow_cl(cls, arrow1, arrow2):
"""Left to Right arrow composition
This expressions means: maybe cast the string as an int, then
maybe add 1
>>> from fp.monads.maybe import Maybe, Just
>>> from fp import p
>>> maybe_int = p(Maybe.catch, int)
>>> string_to_plus = Maybe.arrow_cl(maybe_int, lambda x: Just(x+1))
>>> string_to_plus("1")
Just(2)
>>> string_to_plus("a")
Nothing
"""
return lambda x: arrow1(x).bind(arrow2)
@classmethod
def arrow_cr(cls, arrow1, arrow2):
"""Right to Left arrow composition
This expression means the same as: maybe cast the string as an
int, then maybe add 1 but read right to left.
>>> from fp.monads.maybe import Maybe
>>> from fp import p
>>> maybe_int = p(Maybe.catch, int)
>>> string_to_plus = Maybe.arrow_cr(lambda x: Maybe(x+1), maybe_int)
>>> string_to_plus("1")
Just(2)
"""
return cls.arrow_cl(arrow2, arrow1)
def map(self, f):
"""
Calls a unary function with the value in the monad
>>> from fp.monads.maybe import Maybe, Just, Nothing
>>> Just("1").map(int)
Just(1)
>>> Nothing.map(int)
Nothing
"""
return self.bind(
lambda x: self.ret(f(x))
)
@classmethod
def ap(cls, f, *monads, **kwarg_monads):
"""
Monad.ap(f, *args, **kwargs) allows you to call a pure
function within a Monad.
A normal function like `f(x): x + 1` can be made to take a
Maybe and return a Maybe without any boilerplating:
>>> from fp.monads.maybe import Maybe, Just, Nothing
>>> Maybe.ap(lambda x: x+1, Just(1))
Just(2)
>>> Maybe.ap(lambda x: x+1, Nothing)
Nothing
Here's an example with an add function:
>>> import operator
>>> Maybe.ap(operator.add, Just(1), Just(2))
Just(3)
>>> Maybe.ap(operator.add, Nothing, Just(2))
Nothing
>>> Maybe.ap(operator.add, Just(1), Nothing)
Nothing
It even works with kwargs:
>>> from datetime import timedelta
>>> Maybe.ap(timedelta, days=Just(1), seconds=Just(60))
Just(datetime.timedelta(1, 60))
"""
argsM = cls.sequence(monads)
kwargsM = cls.sequence_dict(kwarg_monads)
return argsM.bind(
lambda args: kwargsM.bind(
lambda kwargs: cls.ret(f(*args, **kwargs))))
@classmethod
def filterM(cls, predM, items):
"""
>>> from fp.monads.maybe import Maybe
>>> from fp import even, c, p
>>> maybeInt = p(Maybe.catch, int) # convert int to a Maybe arrow
>>> maybeEven = p(Maybe.ap, even) # lift even into Maybe
>>> Maybe.filterM(c(maybeEven, maybeInt), ["x","1","2","3","4"])
Just(['2', '4'])
"""
ret = []
for x in items:
def filterArrow(b):
if b:
ret.append(x)
boolM = predM(x)
boolM.bind(filterArrow)
return cls.ret(ret)
def when(self, b):
"""
Execute the action when True, return a noop otherwise
>>> from fp.monads.iomonad import printLn
>>> _ = printLn("Hello").when(True).run()
Hello
>>> _ = printLn("Hello").when(False).run()
"""
cls = self.__class__
noopM = cls.ret(noop)
if b:
return self.bind_(lambda: noopM)
else:
return noopM
def unless(self, b):
"""
Execute the action when False, return a noop otherwise
>>> from fp.monads.iomonad import printLn
>>> _ = printLn("Hello").unless(False).run()
Hello
>>> _ = printLn("Hello").unless(True).run()
"""
return self.when(not b)
@classmethod
def catch(cls, f, *args, **kwargs):
"""
Execute the function f(*args, **kwargs) and return the value inside the
monad.
Catch any errors and call the monad's fail method
>>> from fp.monads.maybe import Maybe
>>> from fp.monads.either import Either
>>> from fp.monads.iomonad import IO
>>> Maybe.catch(lambda: {'foo': 'bar'}['foo'])
Just('bar')
>>> Maybe.catch(lambda: {}['foo'])
Nothing
>>> Either.catch(lambda: {'foo': 'bar'}['foo'])
Right('bar')
>>> Either.catch(lambda: {}['foo'])
Left(KeyError('foo',))
>>> IO.catch(lambda: {'foo': 'bar'}['foo']).run()
'bar'
>>> IO.catch(lambda: {}['foo']).run()
Traceback (most recent call last):
...
KeyError: 'foo'
"""
try:
return cls.ret(f(*args, **kwargs))
except Exception as e:
return cls.fail(e)
class MonadPlus(object):
"""
MonadPlus allows a Monad to define what a zero result is and a
method for adding two MonadPlus instances together.
"""
__metaclass__ = ABCMeta
mzero = NotImplemented # MonadPlus sub-classes need to define mzero
@abstractmethod
def mplus(self, y):
"""
An associative operation
"""
@classmethod
def msum(cls, xs):
"""
Reduces a list of MonadPlus instances using its mplus method:
>>> from fp.monads.maybe import Just, Nothing, Maybe
>>> Maybe.msum([Just(1), Nothing, Just(2)])
Just(1)
>>> Maybe.msum([Nothing, Nothing, Just(2)])
Just(2)
>>> Maybe.msum([Nothing, Nothing, Nothing])
Nothing
"""
return moves.reduce(
cls.mplus,
xs,
cls.mzero)
def mfilter(self, pred):
"""
Returns Monad.mzero if the pred is False.
>>> from fp.monads.maybe import Just
>>> from fp import even
>>> Just(4).mfilter(even)
Just(4)
>>> Just(3).mfilter(even)
Nothing
"""
cls = self.__class__
def inner(x):
if pred(x):
return cls.ret(x)
else:
return cls.mzero
return self.bind(inner)
@classmethod
def guard(cls, b):
"""
>>> from fp.monads.maybe import Maybe
>>> Maybe.guard(True).bind_(lambda: Maybe.ret("Hello"))
Just('Hello')
>>> Maybe.guard(False).bind_(lambda: Maybe.ret("Hello"))
Nothing
"""
if b:
return cls.ret(noop)
else:
return cls.mzero
|
Removed debugging lines
debugging print lines were mistakenly left in
|
import pytest
from share import models
from share.change import ChangeGraph
from share.models import ChangeSet
from tests.share.models.factories import NormalizedDataFactory
from tests.share.normalize.factories import *
initial = [
Preprint(
identifiers=[WorkIdentifier(1)],
agent_relations=[
Contributor(agent=Person(1, name='Bob Dylan')),
Creator(agent=Person(2)),
Creator(agent=Person(3)),
]
),
CreativeWork(
identifiers=[WorkIdentifier(2)],
agent_relations=[
Host(agent=Person(4)),
Funder(agent=Person(name='Bill Gates')),
Publisher(agent=Person(6)),
]
),
CreativeWork(
identifiers=[WorkIdentifier(2)],
related_agents=[
Person(),
Person(name='Laura Gates'),
Person(),
]
),
Publication(
identifiers=[WorkIdentifier(3)],
agent_relations=[
Creator(agent=Person(7))
],
related_works=[
Patent(
agent_relations=[
Contributor(agent=Person(8))
],
identifiers=[WorkIdentifier(4)]
)
]
),
]
@pytest.mark.django_db
class TestPersonDisambiguation:
# everything with delta=0 fails
@pytest.mark.parametrize('input, model, delta', [
([Person(name='Bob Dylan')], models.Person, 1),
([Person(7)], models.Person, 0),
([Publication(related_agents=[Person(4)])], models.Publication, 0),
([Publication(related_agents=[Person(9)])], models.Publication, 1),
([CreativeWork(related_agents=[Person(name='Bill Gates')])], models.CreativeWork, 1),
([Preprint(related_agents=[Person(8)])], models.Preprint, 0),
])
def test_disambiguate(self, input, model, delta, Graph):
initial_cg = ChangeGraph(Graph(*initial))
initial_cg.process(disambiguate=False)
ChangeSet.objects.from_graph(initial_cg, NormalizedDataFactory().id).accept()
Graph.reseed()
# Nasty hack to avoid progres' fuzzy counting
before = model.objects.exclude(change=None).count()
cg = ChangeGraph(Graph(*input))
cg.process()
cs = ChangeSet.objects.from_graph(cg, NormalizedDataFactory().id)
if cs is not None:
cs.accept()
assert (model.objects.exclude(change=None).count() - before) == delta
# first two fail
@pytest.mark.parametrize('input', [
[Person(1)],
[Person(2), Person(3)],
[Person(identifiers=[AgentIdentifier()])],
[Publication(identifiers=[WorkIdentifier()], agent_relations=[Funder(agent=Person()), Publisher(agent=Person())])],
[Preprint(identifiers=[WorkIdentifier()], related_agents=[Person(), Person()], agent_relations=[Funder(agent=Person()), Publisher(agent=Person())])]
])
def test_reaccept(self, input, Graph):
initial_cg = ChangeGraph(Graph(*initial))
initial_cg.process()
ChangeSet.objects.from_graph(initial_cg, NormalizedDataFactory().id).accept()
Graph.reseed() # Force new values to be generated
first_cg = ChangeGraph(Graph(*input))
first_cg.process()
first_cs = ChangeSet.objects.from_graph(first_cg, NormalizedDataFactory().id)
assert first_cs is not None
first_cs.accept()
second_cg = ChangeGraph(Graph(*input))
second_cg.process()
second_cs = ChangeSet.objects.from_graph(second_cg, NormalizedDataFactory().id)
assert second_cs is None
def test_no_changes(self, Graph):
initial_cg = ChangeGraph(Graph(*initial))
initial_cg.process()
ChangeSet.objects.from_graph(initial_cg, NormalizedDataFactory().id).accept()
Graph.discarded_ids.clear()
cg = ChangeGraph(Graph(*initial))
cg.process()
assert ChangeSet.objects.from_graph(cg, NormalizedDataFactory().id) is None
Update person tests to check proper model
import pytest
from share import models
from share.change import ChangeGraph
from share.models import ChangeSet
from tests.share.models.factories import NormalizedDataFactory
from tests.share.normalize.factories import *
initial = [
Preprint(
identifiers=[WorkIdentifier(1)],
agent_relations=[
Contributor(agent=Person(1, name='Bob Dylan')),
Creator(agent=Person(2)),
Creator(agent=Person(3)),
]
),
CreativeWork(
identifiers=[WorkIdentifier(2)],
agent_relations=[
Host(agent=Person(4)),
Funder(agent=Person(name='Bill Gates')),
Publisher(agent=Person(6)),
]
),
CreativeWork(
identifiers=[WorkIdentifier(2)],
related_agents=[
Person(),
Person(name='Laura Gates'),
Person(),
]
),
Publication(
identifiers=[WorkIdentifier(3)],
agent_relations=[
Creator(agent=Person(7))
],
related_works=[
Patent(
agent_relations=[
Contributor(agent=Person(8))
],
identifiers=[WorkIdentifier(4)]
)
]
),
]
@pytest.mark.django_db
class TestPersonDisambiguation:
# everything with delta=0 fails
@pytest.mark.parametrize('input, model, delta', [
([Person(name='Bob Dylan')], models.Person, 1),
([Person(7)], models.Person, 0),
([Publication(related_agents=[Person(4)])], models.Person, 0),
([Publication(related_agents=[Person(9)])], models.Person, 1),
([CreativeWork(related_agents=[Person(name='Bill Gates')])], models.Person, 1),
([Preprint(related_agents=[Person(8)])], models.Person, 0),
])
def test_disambiguate(self, input, model, delta, Graph):
initial_cg = ChangeGraph(Graph(*initial))
initial_cg.process(disambiguate=False)
ChangeSet.objects.from_graph(initial_cg, NormalizedDataFactory().id).accept()
Graph.reseed()
# Nasty hack to avoid progres' fuzzy counting
before = model.objects.exclude(change=None).count()
cg = ChangeGraph(Graph(*input))
cg.process()
cs = ChangeSet.objects.from_graph(cg, NormalizedDataFactory().id)
if cs is not None:
cs.accept()
assert (model.objects.exclude(change=None).count() - before) == delta
# first two fail
@pytest.mark.parametrize('input', [
[Person(1)],
[Person(2), Person(3)],
[Person(identifiers=[AgentIdentifier()])],
[Publication(identifiers=[WorkIdentifier()], agent_relations=[Funder(agent=Person()), Publisher(agent=Person())])],
[Preprint(identifiers=[WorkIdentifier()], related_agents=[Person(), Person()], agent_relations=[Funder(agent=Person()), Publisher(agent=Person())])]
])
def test_reaccept(self, input, Graph):
initial_cg = ChangeGraph(Graph(*initial))
initial_cg.process()
ChangeSet.objects.from_graph(initial_cg, NormalizedDataFactory().id).accept()
Graph.reseed() # Force new values to be generated
first_cg = ChangeGraph(Graph(*input))
first_cg.process()
first_cs = ChangeSet.objects.from_graph(first_cg, NormalizedDataFactory().id)
assert first_cs is not None
first_cs.accept()
second_cg = ChangeGraph(Graph(*input))
second_cg.process()
second_cs = ChangeSet.objects.from_graph(second_cg, NormalizedDataFactory().id)
assert second_cs is None
# fails
def test_no_changes(self, Graph):
initial_cg = ChangeGraph(Graph(*initial))
initial_cg.process()
ChangeSet.objects.from_graph(initial_cg, NormalizedDataFactory().id).accept()
Graph.discarded_ids.clear()
cg = ChangeGraph(Graph(*initial))
cg.process()
assert ChangeSet.objects.from_graph(cg, NormalizedDataFactory().id) is None
|
9a600b40-2d5f-11e5-866c-b88d120fff5e
9a69b8d4-2d5f-11e5-a1bf-b88d120fff5e
9a69b8d4-2d5f-11e5-a1bf-b88d120fff5e |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""
Common utility fuctions
"""
from __future__ import absolute_import, division, print_function
import collections
from math import floor
import six
from astropy.time import Time
from astropy.time import Time
from astropy.time import TimeDelta
from astropy import coordinates as coord
from astropy import units as u
import numpy as np
import datetime
if six.PY2:
def str_to_bytes(s):
return s
def bytes_to_str(b):
return b
else:
def str_to_bytes(s):
return s.encode('utf8')
def bytes_to_str(b):
return b.decode('utf8')
def LSTScheduler(starttime, LSTbin_size, longitude=21.25):
"""
Round a time to the nearest LST bin.
LSTbins start at 0 and step according to LSTbin_size.
Inputs:
starttime: astropy time object
LSTbin_size: lst bin size in seconds
longitude: degrees
Returns:
time of nearest lst bin (astropy.time.Time), lst bin in hours (astropy.coord.Angle)
"""
sidesec = u.Quantity(1, 'sday').to('day').value # length of sidereal second in SI seconds.
locate = coord.EarthLocation(lon=longitude * u.deg, lat=-30 * u.deg) # HERA location, #XXX get the HERA location programmatically
raise TypeError("starttime is not a valid Astropy Time object")
starttime.location = locate
numChunks=(24*60*60)/LSTbin_size #seconds in a day
lstGrid=np.linspace(0,int(numChunks), int(numChunks)+1, dtype=int) * LSTbin_size
hmsList=[None]*(int(numChunks+1))
#convert the grid in seconds to HMS
for i, sec in enumerate(lstGrid): #make a grid of our evenly chunked LST times starting from 00h00m00s on the current day
hrs=int(lstGrid[i]/3600)
mins=int((lstGrid[i]%3600)/60)
secs=int(lstGrid[i]-int(hrs*3600)-int(mins*60))
if hrs==24: hrs=int(0)
hms_str='%02dh%02dm%02ds' % (hrs,mins,secs)
hmsList[i]=hms_str
lstAngleGrid=coord.Angle(hmsList) #turn LST grid into angle array
for i, hour in enumerate(lstAngleGrid):
if hour>=starttime.sidereal_time('apparent'): #Find the timeslot our target is in
diffSide=hour-starttime.sidereal_time('apparent') #get difference in sidereal
diffSecs=diffSide.hms[2]*sidesec #convert difference to SI seconds
break
dt=TimeDelta((diffSecs), format='sec')
scheduleTime=starttime+dt #adjust target time by difference to get start time
return scheduleTime,hour
def calculate_obsid(starttime):
"""
Create a new obsid using Astropy to compute the gps second.
Parameters:
------------
starttime: astropy time object
observation starttime
Returns:
--------
obid
"""
if not isinstance(starttime, Time):
raise ValueError('starttime must be an astropy Time object')
return int(floor(starttime.gps))
def get_iterable(x):
"""Helper function to ensure iterability."""
if isinstance(x, str):
return (x,)
else:
try:
iter(x)
except TypeError:
return (x,)
return x
def _reraise_context(fmt, *args):
"""Reraise an exception with its message modified to specify additional context.
This function tries to help provide context when a piece of code
encounters an exception while trying to get something done, and it wishes
to propagate contextual information farther up the call stack. It is a
consistent way to do it for both Python 2 and 3, since Python 2 does not
provide Python 3’s `exception chaining <https://www.python.org/dev/peps/pep-3134/>`_ functionality.
Instead of that more sophisticated infrastructure, this function just
modifies the textual message associated with the exception being raised.
If only a single argument is supplied, the exception text is prepended with
the stringification of that argument. If multiple arguments are supplied,
the first argument is treated as an old-fashioned ``printf``-type
(``%``-based) format string, and the remaining arguments are the formatted
values.
Borrowed from pwkit (https://github.com/pkgw/pwkit/blob/master/pwkit/__init__.py)
Example usage::
from hera_mc.utils import reraise_context
filename = 'my-filename.txt'
try:
f = filename.open('rt')
for line in f.readlines():
# do stuff ...
except Exception as e:
reraise_context('while reading "%r"', filename)
# The exception is reraised and so control leaves this function.
If an exception with text ``"bad value"`` were to be raised inside the
``try`` block in the above example, its text would be modified to read
``"while reading \"my-filename.txt\": bad value"``.
"""
import sys
if len(args):
cstr = fmt % args
else:
cstr = six.text_type(fmt)
ex = sys.exc_info()[1]
if isinstance(ex, EnvironmentError):
ex.strerror = '%s: %s' % (cstr, ex.strerror)
ex.args = (ex.errno, ex.strerror)
else:
if len(ex.args):
cstr = '%s: %s' % (cstr, ex.args[0])
ex.args = (cstr, ) + ex.args[1:]
raise
pep8
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""
Common utility fuctions
"""
from __future__ import absolute_import, division, print_function
import collections
from math import floor
import six
from astropy.time import Time
from astropy.time import Time
from astropy.time import TimeDelta
from astropy import coordinates as coord
from astropy import units as u
import numpy as np
import datetime
if six.PY2:
def str_to_bytes(s):
return s
def bytes_to_str(b):
return b
else:
def str_to_bytes(s):
return s.encode('utf8')
def bytes_to_str(b):
return b.decode('utf8')
def LSTScheduler(starttime, LSTbin_size, longitude=21.25):
"""
Round a time to the nearest LST bin.
LSTbins start at 0 and step according to LSTbin_size.
Inputs:
starttime: astropy time object
LSTbin_size: lst bin size in seconds
longitude: degrees
Returns:
time of nearest lst bin (astropy.time.Time), lst bin in hours (astropy.coord.Angle)
"""
sidesec = u.Quantity(1, 'sday').to('day').value # length of sidereal second in SI seconds.
locate = coord.EarthLocation(lon=longitude * u.deg, lat=-30 * u.deg) # HERA location, #XXX get the HERA location programmatically
if not isinstance(starttime, Time):
raise TypeError("starttime is not a valid Astropy Time object")
starttime.location = locate
numChunks = (24 * 60 * 60) / LSTbin_size # seconds in a day
lstGrid = np.linspace(0, int(numChunks), int(numChunks) + 1, dtype=int) * LSTbin_size
hmsList = [None] * (int(numChunks + 1))
# convert the grid in seconds to HMS
for i, sec in enumerate(lstGrid): # make a grid of our evenly chunked LST times starting from 00h00m00s on the current day
hrs = int(lstGrid[i] / 3600)
mins = int((lstGrid[i] % 3600) / 60)
secs = int(lstGrid[i] - int(hrs * 3600) - int(mins * 60))
if hrs == 24:
hrs = int(0)
hms_str = '%02dh%02dm%02ds' % (hrs, mins, secs)
hmsList[i] = hms_str
lstAngleGrid = coord.Angle(hmsList) # turn LST grid into angle array
for i, hour in enumerate(lstAngleGrid):
if hour >= starttime.sidereal_time('apparent'): # Find the timeslot our target is in
diffSide = hour - starttime.sidereal_time('apparent') # get difference in sidereal
diffSecs = diffSide.hms[2] * sidesec # convert difference to SI seconds
break
dt = TimeDelta((diffSecs), format='sec')
scheduleTime = starttime + dt # adjust target time by difference to get start time
return scheduleTime, hour
def calculate_obsid(starttime):
"""
Create a new obsid using Astropy to compute the gps second.
Parameters:
------------
starttime: astropy time object
observation starttime
Returns:
--------
obid
"""
if not isinstance(starttime, Time):
raise ValueError('starttime must be an astropy Time object')
return int(floor(starttime.gps))
def get_iterable(x):
"""Helper function to ensure iterability."""
if isinstance(x, str):
return (x,)
else:
try:
iter(x)
except TypeError:
return (x,)
return x
def _reraise_context(fmt, *args):
"""Reraise an exception with its message modified to specify additional context.
This function tries to help provide context when a piece of code
encounters an exception while trying to get something done, and it wishes
to propagate contextual information farther up the call stack. It is a
consistent way to do it for both Python 2 and 3, since Python 2 does not
provide Python 3’s `exception chaining <https://www.python.org/dev/peps/pep-3134/>`_ functionality.
Instead of that more sophisticated infrastructure, this function just
modifies the textual message associated with the exception being raised.
If only a single argument is supplied, the exception text is prepended with
the stringification of that argument. If multiple arguments are supplied,
the first argument is treated as an old-fashioned ``printf``-type
(``%``-based) format string, and the remaining arguments are the formatted
values.
Borrowed from pwkit (https://github.com/pkgw/pwkit/blob/master/pwkit/__init__.py)
Example usage::
from hera_mc.utils import reraise_context
filename = 'my-filename.txt'
try:
f = filename.open('rt')
for line in f.readlines():
# do stuff ...
except Exception as e:
reraise_context('while reading "%r"', filename)
# The exception is reraised and so control leaves this function.
If an exception with text ``"bad value"`` were to be raised inside the
``try`` block in the above example, its text would be modified to read
``"while reading \"my-filename.txt\": bad value"``.
"""
import sys
if len(args):
cstr = fmt % args
else:
cstr = six.text_type(fmt)
ex = sys.exc_info()[1]
if isinstance(ex, EnvironmentError):
ex.strerror = '%s: %s' % (cstr, ex.strerror)
ex.args = (ex.errno, ex.strerror)
else:
if len(ex.args):
cstr = '%s: %s' % (cstr, ex.args[0])
ex.args = (cstr, ) + ex.args[1:]
raise
|
#!/usr/bin/env python
# encoding: utf-8
import argparse
from manage_file import ManageFile, Merge
parser = argparse.ArgumentParser()
parser.add_argument(
'file_merged',
help='file with two merged files'
)
parser.add_argument(
'first_file',
help='first file to merged'
)
parser.add_argument(
'second_file',
help='second file to merged'
)
args = parser.parse_args()
def main():
mf = ManageFile(
args.file_merged,
args.first_file,
args.second_file
)
mg = Merge(mf)
mg.generate_requirements_txt()
if __name__ == '__main__':
main()
remove file_merged
#!/usr/bin/env python
# encoding: utf-8
import argparse
from manage_file import ManageFile, Merge
parser = argparse.ArgumentParser()
parser.add_argument(
'first_file',
help='first file to merged'
)
parser.add_argument(
'second_file',
help='second file to merged'
)
args = parser.parse_args()
def main():
mf = ManageFile(
args.first_file,
args.second_file
)
mg = Merge(mf)
mg.generate_requirements_txt()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: latin-1; -*-
import sys
from PyQt4 import QtCore, QtGui
class RenderThread(QtCore.QThread):
def __init__(self, parent=None, width=100, height=100):
QtCore.QThread.__init__(self, parent)
self.wx = width
self.wy = height
self.inc = 1
self.zoom = 300
self.ox = -0.5
self.oy = 0.5
self.xc = 0.0
self.yc = 0.0
self.nbc = 50
self.pixmap = QtGui.QPixmap(width, height)
self.mutex = QtCore.QMutex()
self.abort = False
def run(self):
#print "starting thread on %dx%d" % (self.wx, self.wy)
self.mutex.lock()
painter = QtGui.QPainter(self.pixmap)
painter.fillRect(self.pixmap.rect(), QtCore.Qt.black)
self.mutex.unlock()
# setup palette
colours = []
for n in range(self.nbc):
colours.append(QtGui.QColor(255*n/self.nbc, 0, 0))
colours.append(QtGui.QColor(0, 0, 0))
for xe in range(0, self.wx, self.inc):
for ye in range(0, self.wy, self.inc):
#print "xe, ye=" , xe, ye
x,y = self.transfo(xe,ye)
n=0; xn=0.0; yn=0.0
while (n!=self.nbc) and (yn*yn+xn*xn<4):
n+=1
xn, yn = xn*xn-yn*yn+x, 2*xn*yn+y
painter.setPen(colours[n])
self.mutex.lock()
painter.drawPoint(xe,ye)
self.mutex.unlock()
if self.abort:
return
#print "thread is over!"
def transfo(self,xe,ye):
x = self.ox + float(xe-self.wx/2)/self.zoom
y = self.oy - float(ye-self.wy/2)/self.zoom
return x,y
class MandelWindow(QtGui.QWidget):
"""
Main Qt Widget
"""
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle("Mandelbrot")
self.resize(320, 200)
self.thread = RenderThread()
self.timer = QtCore.QTimer(self)
self.connect(self.timer, QtCore.SIGNAL("timeout()"), self.timerfct)
self.timer.start(10)
self.mouse = QtCore.QPoint()
self.setMouseTracking(True) # appelle mouseMoveEvent meme si pas de clic
def timerfct(self):
self.update()
import random
print "timer()" , random.random()
def resizeEvent(self, event):
#print "resize!"
while self.thread.isRunning():
self.thread.abort = True
self.thread = RenderThread(width=self.width(), height=self.height())
self.thread.start()
def paintEvent(self, event):
#print "paint!"
self.thread.mutex.lock()
painter = QtGui.QPainter(self)
painter.drawPixmap(0, 0, self.thread.pixmap)
self.thread.mutex.unlock()
# coords
text = "X = %f, Y = %f" % (self.mouse.x(), self.mouse.y())
metrics = painter.fontMetrics()
textWidth = metrics.width(text)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(QtGui.QColor(0, 0, 0, 127))
painter.drawRect((self.width() - textWidth) / 2 - 5, 0, textWidth + 10,
metrics.lineSpacing() + 5)
painter.setPen(QtCore.Qt.white)
painter.drawText((self.width() - textWidth) / 2,
metrics.leading() + metrics.ascent(), text)
def mouseMoveEvent(self, event):
#print "move!"
pos = event.pos()
x,y = self.thread.transfo(pos.x(), pos.y())
self.mouse = QtCore.QPointF(x,y)
self.update()
def main():
app = QtGui.QApplication(sys.argv)
win = MandelWindow()
win.show()
app.exec_()
#app.connect(app, QtCore.SIGNAL("lastWindowClosed()"),app,QtCore.SLOT("quit()"))
#sys.exit(app.exec_())
if __name__=="__main__":
main()
suppression timer update => slots
#!/usr/bin/env python
# -*- coding: latin-1; -*-
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class RenderThread(QThread):
def __init__(self, parent, width=100, height=100):
QThread.__init__(self, parent)
self.wx = width
self.wy = height
self.inc = 1
self.zoom = 300
self.ox = -0.5
self.oy = 0.5
self.xc = 0.0
self.yc = 0.0
self.nbc = 50
self.pixmap = QPixmap(width, height)
self.mutex = QMutex()
self.abort = False
def run(self):
print "starting thread on %dx%d" % (self.wx, self.wy)
self.mutex.lock()
painter = QPainter(self.pixmap)
painter.fillRect(self.pixmap.rect(), Qt.black)
self.mutex.unlock()
# setup palette
colours = []
for n in range(self.nbc):
colours.append(QColor(255*n/self.nbc, 0, 0))
colours.append(QColor(0, 0, 0))
for xe in range(0, self.wx, self.inc):
for ye in range(0, self.wy, self.inc):
#print "xe, ye=" , xe, ye
x,y = self.transfo(xe,ye)
n=0; xn=0.0; yn=0.0
while (n!=self.nbc) and (yn*yn+xn*xn<4):
n+=1
xn, yn = xn*xn-yn*yn+x, 2*xn*yn+y
painter.setPen(colours[n])
self.mutex.lock()
painter.drawPoint(xe,ye)
self.mutex.unlock()
if self.abort:
return
self.emit(SIGNAL("newStuff()"))
#print "thread is over!"
def transfo(self,xe,ye):
x = self.ox + float(xe-self.wx/2)/self.zoom
y = self.oy - float(ye-self.wy/2)/self.zoom
return x,y
class MandelWindow(QWidget):
"""
Main Qt Widget
"""
def __init__(self, parent = None):
QWidget.__init__(self, parent)
self.setWindowTitle("Mandelbrot (QThread)")
self.thread = None
self.mouse = QPoint()
self.setMouseTracking(True) # appelle mouseMoveEvent meme si pas de clic
self.resize(320, 200)
def resizeEvent(self, event):
print "resize!"
self.killThread()
self.thread = RenderThread(self, width=self.width(), height=self.height())
self.connect(self.thread, SIGNAL("newStuff()"), self.update)
self.thread.start()
def paintEvent(self, event):
#print "paint!"
self.thread.mutex.lock()
painter = QPainter(self)
painter.drawPixmap(0, 0, self.thread.pixmap)
self.thread.mutex.unlock()
# coords
text = "X = %f, Y = %f" % (self.mouse.x(), self.mouse.y())
metrics = painter.fontMetrics()
textWidth = metrics.width(text)
painter.setPen(Qt.NoPen)
painter.setBrush(QColor(0, 0, 0, 127))
painter.drawRect((self.width() - textWidth) / 2 - 5, 0, textWidth + 10,
metrics.lineSpacing() + 5)
painter.setPen(Qt.white)
painter.drawText((self.width() - textWidth) / 2,
metrics.leading() + metrics.ascent(), text)
def mouseMoveEvent(self, event):
#print "move!"
pos = event.pos()
x,y = self.thread.transfo(pos.x(), pos.y())
self.mouse = QPointF(x,y)
self.update()
def killThread(self):
if self.thread:
while self.thread.isRunning():
self.thread.abort = True
def closeEvent(self, event):
self.killThread() # important de ne plus dessiner sinon python.exe plante (mais pas pythonw)
def main():
app = QApplication(sys.argv)
win = MandelWindow()
win.show()
app.exec_()
#app.connect(app, SIGNAL("lastWindowClosed()"),app,SLOT("quit()"))
if __name__=="__main__":
main()
|
from SCons.Script import *
## Command Line Variables
#
# Setup all of the command line variables across all of the products and
# platforms. NOTE: if a path is configurable and will be created in the
# build process then the validation MUST be PathAccept
def get_command_line_opts( host, products, VERSIONS ):
opts = Variables('omama.conf')
opts.format = '\n%s: %s\n Default: %s [ %s ]\n'
opts.AddVariables(
# Must be #install by default, otherwise when it comes to cleaning the
# install folder, can remove whole tree
PathVariable('prefix', 'Installation prefix', '#openmama_install_%s' % (VERSIONS['mama']['releaseString']),
PathVariable.PathAccept),
PathVariable('blddir', 'Object directory', '#objdir',
PathVariable.PathAccept),
PathVariable('java_home', 'JAVA Home folder', os.environ.get('JAVA_HOME',None) , PathVariable.PathAccept),
PathVariable('logfile', 'Output Log File', 'scons.log', PathVariable.PathAccept),
BoolVariable('verbose','Whether to print verbose output',True),
BoolVariable('package','Whether to tar up the installation directory',False),
BoolVariable('with_docs','Build with documentation',False),
BoolVariable('with_unittest','Build with gunit tests',False),
BoolVariable('with_testtools','Build with test tools',False),
BoolVariable('with_examples','Build with test tools',True),
PathVariable('oea_home','Path to oea entitlements home',None, PathVariable.PathIsDir),
ListVariable('entitlements', 'List of entitlements libraries to enforce e.g. \'oea\' (NOTE: 1st in list the default entitlements library to use.)', '',
names = ['oea','noop'] ),
PathVariable('gtest_home','Path to Google Test home',None, PathVariable.PathIsDir),
PathVariable('junit_home','Path to Junit home',None, PathVariable.PathIsDir),
ListVariable('middleware','Middleware(s) to be compiled in', 'avis', names = ['avis', 'qpid'] ),
('jobs', 'Number of scons threads to spawn, if n is passed the number of availabe cores is calculated and used', '1'),
)
if host['os'] == 'Windows':
env = Environment()
opts.AddVariables(
ListVariable( 'buildtype', 'Windows Build type e.g dynamic', 'all', names = ['dynamic','dynamic-debug','static','static-debug'] ),
PathVariable('avis_home', 'Path to Avis',
'c:\\avis', PathVariable.PathAccept),
PathVariable('qpid_home', 'Path to QPID Proton Libraries',
'c:\\proton', PathVariable.PathAccept),
EnumVariable('vsver','Visual Studio Version to use', env['MSVC_VERSION'],
allowed_values=('8.0','9.0','10.0','11.0','12.0', '14.0')),
EnumVariable('product', 'Product to be built', 'mamda',
allowed_values=( products )),
EnumVariable('dotnet_version', 'Dotnet Version used to determine framework directory', '2.0',
allowed_values=('1.0','2.0', '4.0')),
PathVariable('dotnet_framework', 'Path to desired dotnet framework', None,
PathVariable.PathIsDir),
PathVariable('nunit_home', 'Path to Nunit home', None,
PathVariable.PathIsDir),
PathVariable('libevent_home', 'Path to libevent Libraries',
'c:\\libevent', PathVariable.PathAccept),
EnumVariable('target_arch', 'Specifies if the build should target 32 or 64 bit architectures.',
host['arch'], allowed_values=['x86', 'x86_64']),
EnumVariable( 'compiler', 'Compiler to use for building OpenMAMA',
'default', allowed_values=('default', 'gcc', 'clang', 'clang-analyzer')),
)
if host['os'] == 'Linux':
opts.AddVariables(
PathVariable('avis_home','Path to Avis', '/usr/local/', PathVariable.PathIsDir),
PathVariable('qpid_home','Path to QPID Proton Libraries',
'/usr/local/', PathVariable.PathIsDir),
PathVariable('cache_dir','Path to object cache', None, PathVariable.PathIsDir),
EnumVariable('product', 'Product to be built', 'mamda',
#mamda all is a windows only build
allowed_values=( [ x for x in products if x != "mamdaall" ] )),
EnumVariable('target_arch', 'Specifies if the build should target 32 or 64 bit architectures.',
host['arch'], allowed_values=['i386', 'i586', 'i686', 'x86', 'x86_64']),
EnumVariable( 'compiler', 'Compiler to use for building OpenMAMA',
'default', allowed_values=('default', 'gcc', 'clang', 'clang-analyzer')),
)
if host['os'] == 'Darwin':
opts.AddVariables(
PathVariable('avis_home','Path to Avis', '/usr/local/', PathVariable.PathIsDir),
PathVariable('qpid_home','Path to QPID Proton Libraries',
'/usr/local/', PathVariable.PathIsDir),
PathVariable('cache_dir','Path to object cache', None, PathVariable.PathIsDir),
EnumVariable('product', 'Product to be built', 'mamda',
#mamda all is a windows only build
allowed_values=( [ x for x in products if x != "mamdaall" ] )),
EnumVariable( 'compiler', 'Compiler to use for building OpenMAMA',
'default', allowed_values=('default', 'clang', 'clang-analyzer')),
EnumVariable('osx_version', 'OS X Version to target build at', 'current',
allowed_values=('current','10.8','10.9','10.10','10.11')),
)
return opts
SCONS: making default entitlements bridge 'noop'.
Signed-off-by: Matt Mulhern <5fc0180c4000b79368b426e76e9bc03887232fd3@srtechlabs.com>
from SCons.Script import *
## Command Line Variables
#
# Setup all of the command line variables across all of the products and
# platforms. NOTE: if a path is configurable and will be created in the
# build process then the validation MUST be PathAccept
def get_command_line_opts( host, products, VERSIONS ):
opts = Variables('omama.conf')
opts.format = '\n%s: %s\n Default: %s [ %s ]\n'
opts.AddVariables(
# Must be #install by default, otherwise when it comes to cleaning the
# install folder, can remove whole tree
PathVariable('prefix', 'Installation prefix', '#openmama_install_%s' % (VERSIONS['mama']['releaseString']),
PathVariable.PathAccept),
PathVariable('blddir', 'Object directory', '#objdir',
PathVariable.PathAccept),
PathVariable('java_home', 'JAVA Home folder', os.environ.get('JAVA_HOME',None) , PathVariable.PathAccept),
PathVariable('logfile', 'Output Log File', 'scons.log', PathVariable.PathAccept),
BoolVariable('verbose','Whether to print verbose output',True),
BoolVariable('package','Whether to tar up the installation directory',False),
BoolVariable('with_docs','Build with documentation',False),
BoolVariable('with_unittest','Build with gunit tests',False),
BoolVariable('with_testtools','Build with test tools',False),
BoolVariable('with_examples','Build with test tools',True),
PathVariable('oea_home','Path to oea entitlements home',None, PathVariable.PathIsDir),
ListVariable('entitlements', 'List of entitlements libraries to enforce e.g. \'oea\' (NOTE: 1st in list the default entitlements library to use.)', 'noop',
names = ['oea','noop'] ),
PathVariable('gtest_home','Path to Google Test home',None, PathVariable.PathIsDir),
PathVariable('junit_home','Path to Junit home',None, PathVariable.PathIsDir),
ListVariable('middleware','Middleware(s) to be compiled in', 'avis', names = ['avis', 'qpid'] ),
('jobs', 'Number of scons threads to spawn, if n is passed the number of availabe cores is calculated and used', '1'),
)
if host['os'] == 'Windows':
env = Environment()
opts.AddVariables(
ListVariable( 'buildtype', 'Windows Build type e.g dynamic', 'all', names = ['dynamic','dynamic-debug','static','static-debug'] ),
PathVariable('avis_home', 'Path to Avis',
'c:\\avis', PathVariable.PathAccept),
PathVariable('qpid_home', 'Path to QPID Proton Libraries',
'c:\\proton', PathVariable.PathAccept),
EnumVariable('vsver','Visual Studio Version to use', env['MSVC_VERSION'],
allowed_values=('8.0','9.0','10.0','11.0','12.0', '14.0')),
EnumVariable('product', 'Product to be built', 'mamda',
allowed_values=( products )),
EnumVariable('dotnet_version', 'Dotnet Version used to determine framework directory', '2.0',
allowed_values=('1.0','2.0', '4.0')),
PathVariable('dotnet_framework', 'Path to desired dotnet framework', None,
PathVariable.PathIsDir),
PathVariable('nunit_home', 'Path to Nunit home', None,
PathVariable.PathIsDir),
PathVariable('libevent_home', 'Path to libevent Libraries',
'c:\\libevent', PathVariable.PathAccept),
EnumVariable('target_arch', 'Specifies if the build should target 32 or 64 bit architectures.',
host['arch'], allowed_values=['x86', 'x86_64']),
EnumVariable( 'compiler', 'Compiler to use for building OpenMAMA',
'default', allowed_values=('default', 'gcc', 'clang', 'clang-analyzer')),
)
if host['os'] == 'Linux':
opts.AddVariables(
PathVariable('avis_home','Path to Avis', '/usr/local/', PathVariable.PathIsDir),
PathVariable('qpid_home','Path to QPID Proton Libraries',
'/usr/local/', PathVariable.PathIsDir),
PathVariable('cache_dir','Path to object cache', None, PathVariable.PathIsDir),
EnumVariable('product', 'Product to be built', 'mamda',
#mamda all is a windows only build
allowed_values=( [ x for x in products if x != "mamdaall" ] )),
EnumVariable('target_arch', 'Specifies if the build should target 32 or 64 bit architectures.',
host['arch'], allowed_values=['i386', 'i586', 'i686', 'x86', 'x86_64']),
EnumVariable( 'compiler', 'Compiler to use for building OpenMAMA',
'default', allowed_values=('default', 'gcc', 'clang', 'clang-analyzer')),
)
if host['os'] == 'Darwin':
opts.AddVariables(
PathVariable('avis_home','Path to Avis', '/usr/local/', PathVariable.PathIsDir),
PathVariable('qpid_home','Path to QPID Proton Libraries',
'/usr/local/', PathVariable.PathIsDir),
PathVariable('cache_dir','Path to object cache', None, PathVariable.PathIsDir),
EnumVariable('product', 'Product to be built', 'mamda',
#mamda all is a windows only build
allowed_values=( [ x for x in products if x != "mamdaall" ] )),
EnumVariable( 'compiler', 'Compiler to use for building OpenMAMA',
'default', allowed_values=('default', 'clang', 'clang-analyzer')),
EnumVariable('osx_version', 'OS X Version to target build at', 'current',
allowed_values=('current','10.8','10.9','10.10','10.11')),
)
return opts
|
import json
import logging
import ddt
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from lms.lib.comment_client.utils import CommentClientPaginatedResult
from edxmako.tests import mako_middleware_process_request
from django_comment_common.utils import ThreadContext
from django_comment_client.forum import views
from django_comment_client.permissions import get_team
from django_comment_client.tests.group_id import (
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
)
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_client.tests.utils import CohortedTestCase
from django_comment_client.utils import strip_none
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from util.testing import UrlResetMixin
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
SharedModuleStoreTestCase,
TEST_DATA_MONGO_MODULESTORE,
)
from xmodule.modulestore.tests.factories import check_mongo_calls, CourseFactory, ItemFactory
from courseware.courses import UserNotEnrolled
from nose.tools import assert_true
from mock import patch, Mock, ANY, call
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from lms.djangoapps.teams.tests.factories import CourseTeamFactory
log = logging.getLogger(__name__)
# pylint: disable=missing-docstring
class ViewsExceptionTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsExceptionTestCase, self).setUp()
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
password = 'test'
# Create the student
self.student = UserFactory(username=uname, password=password, email=email)
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
# Log the student in
self.client = Client()
assert_true(self.client.login(username=uname, password=password))
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_user_profile_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = [], 1, 1
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('django_comment_client.forum.views.user_profile',
kwargs={'course_id': self.course.id.to_deprecated_string(), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.subscribed_threads')
def test_user_followed_threads_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = CommentClientPaginatedResult(collection=[], page=1, num_pages=1)
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('django_comment_client.forum.views.followed_threads',
kwargs={'course_id': self.course.id.to_deprecated_string(), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
def make_mock_thread_data(
course,
text,
thread_id,
num_children,
group_id=None,
group_name=None,
commentable_id=None,
):
data_commentable_id = (
commentable_id or course.discussion_topics.get('General', {}).get('id') or "dummy_commentable_id"
)
thread_data = {
"id": thread_id,
"type": "thread",
"title": text,
"body": text,
"commentable_id": data_commentable_id,
"resp_total": 42,
"resp_skip": 25,
"resp_limit": 5,
"group_id": group_id,
"context": (
ThreadContext.COURSE if get_team(data_commentable_id) is None else ThreadContext.STANDALONE
)
}
if group_id is not None:
thread_data['group_name'] = group_name
if num_children is not None:
thread_data["children"] = [{
"id": "dummy_comment_id_{}".format(i),
"type": "comment",
"body": text,
} for i in range(num_children)]
return thread_data
def make_mock_request_impl(
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1,
):
def mock_request_impl(*args, **kwargs):
url = args[1]
data = None
if url.endswith("threads") or url.endswith("user_profile"):
data = {
"collection": [
make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=None,
group_id=group_id,
commentable_id=commentable_id,
)
]
}
elif thread_id and url.endswith(thread_id):
data = make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=num_thread_responses,
group_id=group_id,
commentable_id=commentable_id
)
elif "/users/" in url:
data = {
"default_sort_key": "date",
"upvoted_ids": [],
"downvoted_ids": [],
"subscribed_thread_ids": [],
}
# comments service adds these attributes when course_id param is present
if kwargs.get('params', {}).get('course_id'):
data.update({
"threads_count": 1,
"comments_count": 2
})
if data:
return Mock(status_code=200, text=json.dumps(data), json=Mock(return_value=data))
return Mock(status_code=404)
return mock_request_impl
class StringEndsWithMatcher(object):
def __init__(self, suffix):
self.suffix = suffix
def __eq__(self, other):
return other.endswith(self.suffix)
class PartialDictMatcher(object):
def __init__(self, expected_values):
self.expected_values = expected_values
def __eq__(self, other):
return all([
key in other and other[key] == value
for key, value in self.expected_values.iteritems()
])
@patch('requests.request', autospec=True)
class SingleThreadTestCase(ModuleStoreTestCase):
def setUp(self):
super(SingleThreadTestCase, self).setUp(create_user=False)
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def test_ajax(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({"mark_as_read": True, "user_id": 1, "recursive": True}),
headers=ANY,
timeout=ANY
)
def test_skip_limit(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
response_skip = "45"
response_limit = "15"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
{"resp_skip": response_skip, "resp_limit": response_limit},
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({
"mark_as_read": True,
"user_id": 1,
"recursive": True,
"resp_skip": response_skip,
"resp_limit": response_limit,
}),
headers=ANY,
timeout=ANY
)
def test_post(self, mock_request):
request = RequestFactory().post("dummy_url")
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"dummy_thread_id"
)
self.assertEquals(response.status_code, 405)
def test_not_found(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
# Mock request to return 404 for thread request
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id=None)
self.assertRaises(
Http404,
views.single_thread,
request,
self.course.id.to_deprecated_string(),
"test_discussion_id",
"test_thread_id"
)
@ddt.ddt
@patch('requests.request', autospec=True)
class SingleThreadQueryCountTestCase(ModuleStoreTestCase):
"""
Ensures the number of modulestore queries and number of sql queries are
independent of the number of responses retrieved for a given discussion thread.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
@ddt.data(
# old mongo with cache
(ModuleStoreEnum.Type.mongo, 1, 6, 4, 16, 8),
(ModuleStoreEnum.Type.mongo, 50, 6, 4, 16, 8),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, 1, 3, 3, 16, 8),
(ModuleStoreEnum.Type.split, 50, 3, 3, 16, 8),
)
@ddt.unpack
def test_number_of_mongo_queries(
self,
default_store,
num_thread_responses,
num_uncached_mongo_calls,
num_cached_mongo_calls,
num_uncached_sql_queries,
num_cached_sql_queries,
mock_request
):
with modulestore().default_store(default_store):
course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=course.id)
test_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=course, text="dummy content", thread_id=test_thread_id, num_thread_responses=num_thread_responses
)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = student
def call_single_thread():
"""
Call single_thread and assert that it returns what we expect.
"""
response = views.single_thread(
request,
course.id.to_deprecated_string(),
"dummy_discussion_id",
test_thread_id
)
self.assertEquals(response.status_code, 200)
self.assertEquals(len(json.loads(response.content)["content"]["children"]), num_thread_responses)
# Test uncached first, then cached now that the cache is warm.
cached_calls = [
[num_uncached_mongo_calls, num_uncached_sql_queries],
[num_cached_mongo_calls, num_cached_sql_queries],
]
for expected_mongo_calls, expected_sql_queries in cached_calls:
with self.assertNumQueries(expected_sql_queries):
with check_mongo_calls(expected_mongo_calls):
call_single_thread()
@patch('requests.request', autospec=True)
class SingleCohortedThreadTestCase(CohortedTestCase):
def _create_mock_cohorted_thread(self, mock_request):
self.mock_text = "dummy content"
self.mock_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.mock_text, thread_id=self.mock_thread_id, group_id=self.student_cohort.id
)
def test_ajax(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"cohorted_topic",
self.mock_thread_id
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEquals(
response_data["content"],
make_mock_thread_data(
course=self.course,
text=self.mock_text,
thread_id=self.mock_thread_id,
num_children=1,
group_id=self.student_cohort.id,
group_name=self.student_cohort.name
)
)
def test_html(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get("dummy_url")
request.user = self.student
mako_middleware_process_request(request)
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"cohorted_topic",
self.mock_thread_id
)
self.assertEquals(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
# Verify that the group name is correctly included in the HTML
self.assertRegexpMatches(html, r'"group_name": "student_cohort"')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class SingleThreadAccessTestCase(CohortedTestCase):
def call_view(self, mock_request, commentable_id, user, group_id, thread_group_id=None, pass_group_id=True):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", thread_id=thread_id, group_id=thread_group_id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.single_thread(
request,
self.course.id.to_deprecated_string(),
commentable_id,
thread_id
)
def test_student_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.student, self.student_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_student_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
# this test ensures that a thread response from the cs with group_id: null
# behaves the same as a thread response without a group_id (see: TNL-444)
def test_student_global_thread_in_cohorted_topic(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=None
)
self.assertEqual(resp.status_code, 200)
def test_student_different_cohort(self, mock_request):
self.assertRaises(
Http404,
lambda: self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.moderator_cohort.id
)
)
def test_moderator_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.moderator, self.moderator_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_moderator_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.moderator_cohort.id
)
self.assertEqual(resp.status_code, 200)
def test_moderator_different_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class SingleThreadGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", group_id=self.student_cohort.id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = user
mako_middleware_process_request(request)
return views.single_thread(
request,
self.course.id.to_deprecated_string(),
commentable_id,
"dummy_thread_id"
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['content']
)
@patch('requests.request', autospec=True)
class SingleThreadContentGroupTestCase(ContentGroupTestCase):
def assert_can_access(self, user, discussion_id, thread_id, should_have_access):
"""
Verify that a user has access to a thread within a given
discussion_id when should_have_access is True, otherwise
verify that the user does not have access to that thread.
"""
request = RequestFactory().get("dummy_url")
request.user = user
mako_middleware_process_request(request)
def call_single_thread():
return views.single_thread(
request,
unicode(self.course.id),
discussion_id,
thread_id
)
if should_have_access:
self.assertEqual(call_single_thread().status_code, 200)
else:
with self.assertRaises(Http404):
call_single_thread()
def test_staff_user(self, mock_request):
"""
Verify that the staff user can access threads in the alpha,
beta, and global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.alpha_module, self.beta_module, self.global_module]:
self.assert_can_access(self.staff_user, discussion_module.discussion_id, thread_id, True)
def test_alpha_user(self, mock_request):
"""
Verify that the alpha user can access threads in the alpha and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.alpha_module, self.global_module]:
self.assert_can_access(self.alpha_user, discussion_module.discussion_id, thread_id, True)
self.assert_can_access(self.alpha_user, self.beta_module.discussion_id, thread_id, False)
def test_beta_user(self, mock_request):
"""
Verify that the beta user can access threads in the beta and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.beta_module, self.global_module]:
self.assert_can_access(self.beta_user, discussion_module.discussion_id, thread_id, True)
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_non_cohorted_user(self, mock_request):
"""
Verify that the non-cohorted user can access threads in just the
global discussion module.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
self.assert_can_access(self.non_cohorted_user, self.global_module.discussion_id, thread_id, True)
self.assert_can_access(self.non_cohorted_user, self.alpha_module.discussion_id, thread_id, False)
self.assert_can_access(self.non_cohorted_user, self.beta_module.discussion_id, thread_id, False)
def test_course_context_respected(self, mock_request):
"""
Verify that course threads go through discussion_category_id_access method.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id
)
# Beta user does not have access to alpha_module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_standalone_context_respected(self, mock_request):
"""
Verify that standalone threads don't go through discussion_category_id_access method.
"""
# For this rather pathological test, we are assigning the alpha module discussion_id (commentable_id)
# to a team so that we can verify that standalone threads don't go through discussion_category_id_access.
thread_id = "test_thread_id"
CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.alpha_module.discussion_id
)
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id,
commentable_id=self.alpha_module.discussion_id
)
# If a thread returns context other than "course", the access check is not done, and the beta user
# can see the alpha discussion module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, True)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionContextTestCase(ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionContextTestCase, self).setUp()
self.course = CourseFactory.create()
CourseEnrollmentFactory(user=self.user, course_id=self.course.id)
self.discussion_topic_id = "dummy_topic"
self.team = CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.discussion_topic_id
)
self.team.add_user(self.user) # pylint: disable=no-member
def test_context_can_be_standalone(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy text",
commentable_id=self.discussion_topic_id
)
request = RequestFactory().get("dummy_url")
request.user = self.user
response = views.inline_discussion(
request,
unicode(self.course.id),
self.discussion_topic_id,
)
json_response = json.loads(response.content)
self.assertEqual(json_response['discussion_data'][0]['context'], ThreadContext.STANDALONE)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionGroupIdTestCase(
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def setUp(self):
super(InlineDiscussionGroupIdTestCase, self).setUp()
self.cohorted_commentable_id = 'cohorted_topic'
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {'commentable_id': self.cohorted_commentable_id}
if group_id:
# avoid causing a server error when the LMS chokes attempting
# to find a group name for the group_id, when we're testing with
# an invalid one.
try:
CourseUserGroup.objects.get(id=group_id)
kwargs['group_id'] = group_id
except CourseUserGroup.DoesNotExist:
pass
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data
)
request.user = user
return views.inline_discussion(
request,
self.course.id.to_deprecated_string(),
commentable_id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
self.cohorted_commentable_id,
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ForumFormDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = user
mako_middleware_process_request(request)
return views.forum_form_discussion(
request,
self.course.id.to_deprecated_string()
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class UserProfileDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/active_threads"
def call_view_for_profiled_user(
self, mock_request, requesting_user, profiled_user, group_id, pass_group_id, is_ajax=False
):
"""
Calls "user_profile" view method on behalf of "requesting_user" to get information about
the user "profiled_user".
"""
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = requesting_user
mako_middleware_process_request(request)
return views.user_profile(
request,
self.course.id.to_deprecated_string(),
profiled_user.id
)
def call_view(self, mock_request, _commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
return self.call_view_for_profiled_user(
mock_request, user, user, group_id, pass_group_id=pass_group_id, is_ajax=is_ajax
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
def _test_group_id_passed_to_user_profile(
self, mock_request, expect_group_id_in_request, requesting_user, profiled_user, group_id, pass_group_id
):
"""
Helper method for testing whether or not group_id was passed to the user_profile request.
"""
def get_params_from_user_info_call(for_specific_course):
"""
Returns the request parameters for the user info call with either course_id specified or not,
depending on value of 'for_specific_course'.
"""
# There will be 3 calls from user_profile. One has the cs_endpoint "active_threads", and it is already
# tested. The other 2 calls are for user info; one of those calls is for general information about the user,
# and it does not specify a course_id. The other call does specify a course_id, and if the caller did not
# have discussion moderator privileges, it should also contain a group_id.
for r_call in mock_request.call_args_list:
if not r_call[0][1].endswith(self.cs_endpoint):
params = r_call[1]["params"]
has_course_id = "course_id" in params
if (for_specific_course and has_course_id) or (not for_specific_course and not has_course_id):
return params
self.assertTrue(
False,
"Did not find appropriate user_profile call for 'for_specific_course'=" + for_specific_course
)
mock_request.reset_mock()
self.call_view_for_profiled_user(
mock_request,
requesting_user,
profiled_user,
group_id,
pass_group_id=pass_group_id,
is_ajax=False
)
# Should never have a group_id if course_id was not included in the request.
params_without_course_id = get_params_from_user_info_call(False)
self.assertNotIn("group_id", params_without_course_id)
params_with_course_id = get_params_from_user_info_call(True)
if expect_group_id_in_request:
self.assertIn("group_id", params_with_course_id)
self.assertEqual(group_id, params_with_course_id["group_id"])
else:
self.assertNotIn("group_id", params_with_course_id)
def test_group_id_passed_to_user_profile_student(self, mock_request):
"""
Test that the group id is always included when requesting user profile information for a particular
course if the requester does not have discussion moderation privileges.
"""
def verify_group_id_always_present(profiled_user, pass_group_id):
"""
Helper method to verify that group_id is always present for student in course
(non-privileged user).
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.student, profiled_user, self.student_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the student (non-privileged user).
# The profile returned on behalf of the student is for the profiled_user.
verify_group_id_always_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=False)
def test_group_id_user_profile_moderator(self, mock_request):
"""
Test that the group id is only included when a privileged user requests user profile information for a
particular course and user if the group_id is explicitly passed in.
"""
def verify_group_id_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
def verify_group_id_not_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is not present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, False, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the moderator (privileged user).
# If the group_id is explicitly passed, it will be present in the request.
verify_group_id_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_present(
profiled_user=self.student, pass_group_id=True, requested_cohort=self.student_cohort
)
# If the group_id is not explicitly passed, it will not be present because the requesting_user
# has discussion moderator privileges.
verify_group_id_not_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_not_present(profiled_user=self.moderator, pass_group_id=False)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class FollowedThreadsDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/subscribed_threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.followed_threads(
request,
self.course.id.to_deprecated_string(),
user.id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionTestCase(ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionTestCase, self).setUp()
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
display_name='Discussion1',
discussion_category="Chapter",
discussion_target="Discussion1"
)
def send_request(self, mock_request, params=None):
"""
Creates and returns a request with params set, and configures
mock_request to return appropriate values.
"""
request = RequestFactory().get("dummy_url", params if params else {})
request.user = self.student
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", commentable_id=self.discussion1.discussion_id
)
return views.inline_discussion(
request, self.course.id.to_deprecated_string(), self.discussion1.discussion_id
)
def verify_response(self, response):
"""Verifies that the response contains the appropriate courseware_url and courseware_title"""
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
expected_courseware_url = '/courses/TestX/101/Test_Course/jump_to/i4x://TestX/101/discussion/Discussion1'
expected_courseware_title = 'Chapter / Discussion1'
self.assertEqual(response_data['discussion_data'][0]['courseware_url'], expected_courseware_url)
self.assertEqual(response_data["discussion_data"][0]["courseware_title"], expected_courseware_title)
def test_courseware_data(self, mock_request):
self.verify_response(self.send_request(mock_request))
def test_context(self, mock_request):
team = CourseTeamFactory(
name='Team Name',
topic_id='A topic',
course_id=self.course.id,
discussion_topic_id=self.discussion1.discussion_id
)
team.add_user(self.student) # pylint: disable=no-member
response = self.send_request(mock_request)
self.assertEqual(mock_request.call_args[1]['params']['context'], ThreadContext.STANDALONE)
self.verify_response(response)
@patch('requests.request', autospec=True)
class UserProfileTestCase(ModuleStoreTestCase):
TEST_THREAD_TEXT = 'userprofile-test-text'
TEST_THREAD_ID = 'userprofile-test-thread-id'
def setUp(self):
super(UserProfileTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
self.profiled_user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def get_response(self, mock_request, params, **headers):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().get("dummy_url", data=params, **headers)
request.user = self.student
mako_middleware_process_request(request)
response = views.user_profile(
request,
self.course.id.to_deprecated_string(),
self.profiled_user.id
)
mock_request.assert_any_call(
"get",
StringEndsWithMatcher('/users/{}/active_threads'.format(self.profiled_user.id)),
data=None,
params=PartialDictMatcher({
"course_id": self.course.id.to_deprecated_string(),
"page": params.get("page", 1),
"per_page": views.THREADS_PER_PAGE
}),
headers=ANY,
timeout=ANY
)
return response
def check_html(self, mock_request, **params):
response = self.get_response(mock_request, params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
self.assertRegexpMatches(html, r'data-page="1"')
self.assertRegexpMatches(html, r'data-num-pages="1"')
self.assertRegexpMatches(html, r'<span>1</span> discussion started')
self.assertRegexpMatches(html, r'<span>2</span> comments')
self.assertRegexpMatches(html, r'"id": "{}"'.format(self.TEST_THREAD_ID))
self.assertRegexpMatches(html, r'"title": "{}"'.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r'"body": "{}"'.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r'"username": "{}"'.format(self.student.username))
def check_ajax(self, mock_request, **params):
response = self.get_response(mock_request, params, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json; charset=utf-8')
response_data = json.loads(response.content)
self.assertEqual(
sorted(response_data.keys()),
["annotated_content_info", "discussion_data", "num_pages", "page"]
)
self.assertEqual(len(response_data['discussion_data']), 1)
self.assertEqual(response_data["page"], 1)
self.assertEqual(response_data["num_pages"], 1)
self.assertEqual(response_data['discussion_data'][0]['id'], self.TEST_THREAD_ID)
self.assertEqual(response_data['discussion_data'][0]['title'], self.TEST_THREAD_TEXT)
self.assertEqual(response_data['discussion_data'][0]['body'], self.TEST_THREAD_TEXT)
def test_html(self, mock_request):
self.check_html(mock_request)
def test_html_p2(self, mock_request):
self.check_html(mock_request, page="2")
def test_ajax(self, mock_request):
self.check_ajax(mock_request)
def test_ajax_p2(self, mock_request):
self.check_ajax(mock_request, page="2")
def test_404_profiled_user(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
self.course.id.to_deprecated_string(),
-999
)
def test_404_course(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
"non/existent/course",
self.profiled_user.id
)
def test_post(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().post("dummy_url")
request.user = self.student
response = views.user_profile(
request,
self.course.id.to_deprecated_string(),
self.profiled_user.id
)
self.assertEqual(response.status_code, 405)
@patch('requests.request', autospec=True)
class CommentsServiceRequestHeadersTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CommentsServiceRequestHeadersTestCase, self).setUp()
username = "foo"
password = "bar"
# Invoke UrlResetMixin
super(CommentsServiceRequestHeadersTestCase, self).setUp(create_user=False)
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(
self.client.login(username=username, password=password)
)
def assert_all_calls_have_header(self, mock_request, key, value):
expected = call(
ANY, # method
ANY, # url
data=ANY,
params=ANY,
headers=PartialDictMatcher({key: value}),
timeout=ANY
)
for actual in mock_request.call_args_list:
self.assertEqual(expected, actual)
def test_accept_language(self, mock_request):
lang = "eo"
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
self.client.get(
reverse(
"django_comment_client.forum.views.single_thread",
kwargs={
"course_id": self.course.id.to_deprecated_string(),
"discussion_id": "dummy_discussion_id",
"thread_id": thread_id,
}
),
HTTP_ACCEPT_LANGUAGE=lang,
)
self.assert_all_calls_have_header(mock_request, "Accept-Language", lang)
@override_settings(COMMENTS_SERVICE_KEY="test_api_key")
def test_api_key(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id="dummy")
self.client.get(
reverse(
"django_comment_client.forum.views.forum_form_discussion",
kwargs={"course_id": self.course.id.to_deprecated_string()}
),
)
self.assert_all_calls_have_header(mock_request, "X-Edx-Api-Key", "test_api_key")
class InlineDiscussionUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(InlineDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(InlineDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
response = views.inline_discussion(
request, self.course.id.to_deprecated_string(), self.course.discussion_topics['General']['id']
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
def _test_invalid_course_id(self):
""" Asserts that Http404 is raised when the course id is not valid. """
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.inline_discussion(
request, unicode(self.course.id), self.course.discussion_topics['General']['id']
)
class ForumFormDiscussionUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumFormDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumFormDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, self.course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
@ddt.ddt
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ForumDiscussionXSSTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ForumDiscussionXSSTestCase, self).setUp()
username = "foo"
password = "bar"
self.course = CourseFactory.create()
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(self.client.login(username=username, password=password))
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
def test_forum_discussion_xss_prevent(self, malicious_code, mock_user, mock_req): # pylint: disable=unused-argument
"""
Test that XSS attack is prevented
"""
reverse_url = "%s%s" % (reverse(
"django_comment_client.forum.views.forum_form_discussion",
kwargs={"course_id": unicode(self.course.id)}), '/forum_form_discussion')
# Test that malicious code does not appear in html
url = "%s?%s=%s" % (reverse_url, 'sort_key', malicious_code)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(malicious_code, resp.content)
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_forum_user_profile_xss_prevent(self, malicious_code, mock_threads, mock_from_django_user, mock_request):
"""
Test that XSS attack is prevented
"""
mock_threads.return_value = [], 1, 1
mock_from_django_user.return_value = Mock()
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
url = reverse('django_comment_client.forum.views.user_profile',
kwargs={'course_id': unicode(self.course.id), 'user_id': str(self.student.id)})
# Test that malicious code does not appear in html
url_string = "%s?%s=%s" % (url, 'page', malicious_code)
resp = self.client.get(url_string)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(malicious_code, resp.content)
class ForumDiscussionSearchUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumDiscussionSearchUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumDiscussionSearchUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
data = {
"ajax": 1,
"text": text,
}
request = RequestFactory().get("dummy_url", data)
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, self.course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class SingleThreadUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(SingleThreadUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create(discussion_topics={'dummy_discussion_id': {'id': 'dummy_discussion_id'}})
@classmethod
def setUpTestData(cls):
super(SingleThreadUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.single_thread(request, self.course.id.to_deprecated_string(), "dummy_discussion_id", thread_id)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["content"]["title"], text)
self.assertEqual(response_data["content"]["body"], text)
class UserProfileUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UserProfileUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UserProfileUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.user_profile(request, self.course.id.to_deprecated_string(), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class FollowedThreadsUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(FollowedThreadsUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(FollowedThreadsUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.followed_threads(request, self.course.id.to_deprecated_string(), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class EnrollmentTestCase(ModuleStoreTestCase):
"""
Tests for the behavior of views depending on if the student is enrolled
in the course
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(EnrollmentTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_unenrolled(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
request = RequestFactory().get('dummy_url')
request.user = self.student
with self.assertRaises(UserNotEnrolled):
views.forum_form_discussion(request, course_id=self.course.id.to_deprecated_string())
-Addressed wrong course id comment.
import json
import logging
import ddt
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from lms.lib.comment_client.utils import CommentClientPaginatedResult
from edxmako.tests import mako_middleware_process_request
from django_comment_common.utils import ThreadContext
from django_comment_client.forum import views
from django_comment_client.permissions import get_team
from django_comment_client.tests.group_id import (
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
)
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_client.tests.utils import CohortedTestCase
from django_comment_client.utils import strip_none
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from util.testing import UrlResetMixin
from openedx.core.djangoapps.util.testing import ContentGroupTestCase
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import (
ModuleStoreTestCase,
SharedModuleStoreTestCase,
TEST_DATA_MONGO_MODULESTORE,
)
from xmodule.modulestore.tests.factories import check_mongo_calls, CourseFactory, ItemFactory
from courseware.courses import UserNotEnrolled
from nose.tools import assert_true
from mock import patch, Mock, ANY, call
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from lms.djangoapps.teams.tests.factories import CourseTeamFactory
log = logging.getLogger(__name__)
# pylint: disable=missing-docstring
class ViewsExceptionTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsExceptionTestCase, self).setUp()
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = 'student@edx.org'
password = 'test'
# Create the student
self.student = UserFactory(username=uname, password=password, email=email)
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
# Log the student in
self.client = Client()
assert_true(self.client.login(username=uname, password=password))
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_user_profile_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = [], 1, 1
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('django_comment_client.forum.views.user_profile',
kwargs={'course_id': self.course.id.to_deprecated_string(), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.subscribed_threads')
def test_user_followed_threads_exception(self, mock_threads, mock_from_django_user):
# Mock the code that makes the HTTP requests to the cs_comment_service app
# for the profiled user's active threads
mock_threads.return_value = CommentClientPaginatedResult(collection=[], page=1, num_pages=1)
# Mock the code that makes the HTTP request to the cs_comment_service app
# that gets the current user's info
mock_from_django_user.return_value = Mock()
url = reverse('django_comment_client.forum.views.followed_threads',
kwargs={'course_id': self.course.id.to_deprecated_string(), 'user_id': '12345'}) # There is no user 12345
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
def make_mock_thread_data(
course,
text,
thread_id,
num_children,
group_id=None,
group_name=None,
commentable_id=None,
):
data_commentable_id = (
commentable_id or course.discussion_topics.get('General', {}).get('id') or "dummy_commentable_id"
)
thread_data = {
"id": thread_id,
"type": "thread",
"title": text,
"body": text,
"commentable_id": data_commentable_id,
"resp_total": 42,
"resp_skip": 25,
"resp_limit": 5,
"group_id": group_id,
"context": (
ThreadContext.COURSE if get_team(data_commentable_id) is None else ThreadContext.STANDALONE
)
}
if group_id is not None:
thread_data['group_name'] = group_name
if num_children is not None:
thread_data["children"] = [{
"id": "dummy_comment_id_{}".format(i),
"type": "comment",
"body": text,
} for i in range(num_children)]
return thread_data
def make_mock_request_impl(
course,
text,
thread_id="dummy_thread_id",
group_id=None,
commentable_id=None,
num_thread_responses=1,
):
def mock_request_impl(*args, **kwargs):
url = args[1]
data = None
if url.endswith("threads") or url.endswith("user_profile"):
data = {
"collection": [
make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=None,
group_id=group_id,
commentable_id=commentable_id,
)
]
}
elif thread_id and url.endswith(thread_id):
data = make_mock_thread_data(
course=course,
text=text,
thread_id=thread_id,
num_children=num_thread_responses,
group_id=group_id,
commentable_id=commentable_id
)
elif "/users/" in url:
data = {
"default_sort_key": "date",
"upvoted_ids": [],
"downvoted_ids": [],
"subscribed_thread_ids": [],
}
# comments service adds these attributes when course_id param is present
if kwargs.get('params', {}).get('course_id'):
data.update({
"threads_count": 1,
"comments_count": 2
})
if data:
return Mock(status_code=200, text=json.dumps(data), json=Mock(return_value=data))
return Mock(status_code=404)
return mock_request_impl
class StringEndsWithMatcher(object):
def __init__(self, suffix):
self.suffix = suffix
def __eq__(self, other):
return other.endswith(self.suffix)
class PartialDictMatcher(object):
def __init__(self, expected_values):
self.expected_values = expected_values
def __eq__(self, other):
return all([
key in other and other[key] == value
for key, value in self.expected_values.iteritems()
])
@patch('requests.request', autospec=True)
class SingleThreadTestCase(ModuleStoreTestCase):
def setUp(self):
super(SingleThreadTestCase, self).setUp(create_user=False)
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def test_ajax(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({"mark_as_read": True, "user_id": 1, "recursive": True}),
headers=ANY,
timeout=ANY
)
def test_skip_limit(self, mock_request):
text = "dummy content"
thread_id = "test_thread_id"
response_skip = "45"
response_limit = "15"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get(
"dummy_url",
{"resp_skip": response_skip, "resp_limit": response_limit},
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"test_thread_id"
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
# strip_none is being used to perform the same transform that the
# django view performs prior to writing thread data to the response
self.assertEquals(
response_data["content"],
strip_none(make_mock_thread_data(course=self.course, text=text, thread_id=thread_id, num_children=1))
)
mock_request.assert_called_with(
"get",
StringEndsWithMatcher(thread_id), # url
data=None,
params=PartialDictMatcher({
"mark_as_read": True,
"user_id": 1,
"recursive": True,
"resp_skip": response_skip,
"resp_limit": response_limit,
}),
headers=ANY,
timeout=ANY
)
def test_post(self, mock_request):
request = RequestFactory().post("dummy_url")
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"dummy_discussion_id",
"dummy_thread_id"
)
self.assertEquals(response.status_code, 405)
def test_not_found(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
# Mock request to return 404 for thread request
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id=None)
self.assertRaises(
Http404,
views.single_thread,
request,
self.course.id.to_deprecated_string(),
"test_discussion_id",
"test_thread_id"
)
@ddt.ddt
@patch('requests.request', autospec=True)
class SingleThreadQueryCountTestCase(ModuleStoreTestCase):
"""
Ensures the number of modulestore queries and number of sql queries are
independent of the number of responses retrieved for a given discussion thread.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
@ddt.data(
# old mongo with cache
(ModuleStoreEnum.Type.mongo, 1, 6, 4, 16, 8),
(ModuleStoreEnum.Type.mongo, 50, 6, 4, 16, 8),
# split mongo: 3 queries, regardless of thread response size.
(ModuleStoreEnum.Type.split, 1, 3, 3, 16, 8),
(ModuleStoreEnum.Type.split, 50, 3, 3, 16, 8),
)
@ddt.unpack
def test_number_of_mongo_queries(
self,
default_store,
num_thread_responses,
num_uncached_mongo_calls,
num_cached_mongo_calls,
num_uncached_sql_queries,
num_cached_sql_queries,
mock_request
):
with modulestore().default_store(default_store):
course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=course.id)
test_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=course, text="dummy content", thread_id=test_thread_id, num_thread_responses=num_thread_responses
)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = student
def call_single_thread():
"""
Call single_thread and assert that it returns what we expect.
"""
response = views.single_thread(
request,
course.id.to_deprecated_string(),
"dummy_discussion_id",
test_thread_id
)
self.assertEquals(response.status_code, 200)
self.assertEquals(len(json.loads(response.content)["content"]["children"]), num_thread_responses)
# Test uncached first, then cached now that the cache is warm.
cached_calls = [
[num_uncached_mongo_calls, num_uncached_sql_queries],
[num_cached_mongo_calls, num_cached_sql_queries],
]
for expected_mongo_calls, expected_sql_queries in cached_calls:
with self.assertNumQueries(expected_sql_queries):
with check_mongo_calls(expected_mongo_calls):
call_single_thread()
@patch('requests.request', autospec=True)
class SingleCohortedThreadTestCase(CohortedTestCase):
def _create_mock_cohorted_thread(self, mock_request):
self.mock_text = "dummy content"
self.mock_thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.mock_text, thread_id=self.mock_thread_id, group_id=self.student_cohort.id
)
def test_ajax(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get(
"dummy_url",
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = self.student
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"cohorted_topic",
self.mock_thread_id
)
self.assertEquals(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEquals(
response_data["content"],
make_mock_thread_data(
course=self.course,
text=self.mock_text,
thread_id=self.mock_thread_id,
num_children=1,
group_id=self.student_cohort.id,
group_name=self.student_cohort.name
)
)
def test_html(self, mock_request):
self._create_mock_cohorted_thread(mock_request)
request = RequestFactory().get("dummy_url")
request.user = self.student
mako_middleware_process_request(request)
response = views.single_thread(
request,
self.course.id.to_deprecated_string(),
"cohorted_topic",
self.mock_thread_id
)
self.assertEquals(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
# Verify that the group name is correctly included in the HTML
self.assertRegexpMatches(html, r'"group_name": "student_cohort"')
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class SingleThreadAccessTestCase(CohortedTestCase):
def call_view(self, mock_request, commentable_id, user, group_id, thread_group_id=None, pass_group_id=True):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", thread_id=thread_id, group_id=thread_group_id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.single_thread(
request,
self.course.id.to_deprecated_string(),
commentable_id,
thread_id
)
def test_student_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.student, self.student_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_student_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
# this test ensures that a thread response from the cs with group_id: null
# behaves the same as a thread response without a group_id (see: TNL-444)
def test_student_global_thread_in_cohorted_topic(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=None
)
self.assertEqual(resp.status_code, 200)
def test_student_different_cohort(self, mock_request):
self.assertRaises(
Http404,
lambda: self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
thread_group_id=self.moderator_cohort.id
)
)
def test_moderator_non_cohorted(self, mock_request):
resp = self.call_view(mock_request, "non_cohorted_topic", self.moderator, self.moderator_cohort.id)
self.assertEqual(resp.status_code, 200)
def test_moderator_same_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.moderator_cohort.id
)
self.assertEqual(resp.status_code, 200)
def test_moderator_different_cohort(self, mock_request):
resp = self.call_view(
mock_request,
"cohorted_topic",
self.moderator,
self.moderator_cohort.id,
thread_group_id=self.student_cohort.id
)
self.assertEqual(resp.status_code, 200)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class SingleThreadGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy context", group_id=self.student_cohort.id
)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = user
mako_middleware_process_request(request)
return views.single_thread(
request,
self.course.id.to_deprecated_string(),
commentable_id,
"dummy_thread_id"
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['content']
)
@patch('requests.request', autospec=True)
class SingleThreadContentGroupTestCase(ContentGroupTestCase):
def assert_can_access(self, user, discussion_id, thread_id, should_have_access):
"""
Verify that a user has access to a thread within a given
discussion_id when should_have_access is True, otherwise
verify that the user does not have access to that thread.
"""
request = RequestFactory().get("dummy_url")
request.user = user
mako_middleware_process_request(request)
def call_single_thread():
return views.single_thread(
request,
unicode(self.course.id),
discussion_id,
thread_id
)
if should_have_access:
self.assertEqual(call_single_thread().status_code, 200)
else:
with self.assertRaises(Http404):
call_single_thread()
def test_staff_user(self, mock_request):
"""
Verify that the staff user can access threads in the alpha,
beta, and global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.alpha_module, self.beta_module, self.global_module]:
self.assert_can_access(self.staff_user, discussion_module.discussion_id, thread_id, True)
def test_alpha_user(self, mock_request):
"""
Verify that the alpha user can access threads in the alpha and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.alpha_module, self.global_module]:
self.assert_can_access(self.alpha_user, discussion_module.discussion_id, thread_id, True)
self.assert_can_access(self.alpha_user, self.beta_module.discussion_id, thread_id, False)
def test_beta_user(self, mock_request):
"""
Verify that the beta user can access threads in the beta and
global discussion modules.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
for discussion_module in [self.beta_module, self.global_module]:
self.assert_can_access(self.beta_user, discussion_module.discussion_id, thread_id, True)
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_non_cohorted_user(self, mock_request):
"""
Verify that the non-cohorted user can access threads in just the
global discussion module.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy content", thread_id=thread_id)
self.assert_can_access(self.non_cohorted_user, self.global_module.discussion_id, thread_id, True)
self.assert_can_access(self.non_cohorted_user, self.alpha_module.discussion_id, thread_id, False)
self.assert_can_access(self.non_cohorted_user, self.beta_module.discussion_id, thread_id, False)
def test_course_context_respected(self, mock_request):
"""
Verify that course threads go through discussion_category_id_access method.
"""
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id
)
# Beta user does not have access to alpha_module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, False)
def test_standalone_context_respected(self, mock_request):
"""
Verify that standalone threads don't go through discussion_category_id_access method.
"""
# For this rather pathological test, we are assigning the alpha module discussion_id (commentable_id)
# to a team so that we can verify that standalone threads don't go through discussion_category_id_access.
thread_id = "test_thread_id"
CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.alpha_module.discussion_id
)
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", thread_id=thread_id,
commentable_id=self.alpha_module.discussion_id
)
# If a thread returns context other than "course", the access check is not done, and the beta user
# can see the alpha discussion module.
self.assert_can_access(self.beta_user, self.alpha_module.discussion_id, thread_id, True)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionContextTestCase(ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionContextTestCase, self).setUp()
self.course = CourseFactory.create()
CourseEnrollmentFactory(user=self.user, course_id=self.course.id)
self.discussion_topic_id = "dummy_topic"
self.team = CourseTeamFactory(
name="A team",
course_id=self.course.id,
topic_id='topic_id',
discussion_topic_id=self.discussion_topic_id
)
self.team.add_user(self.user) # pylint: disable=no-member
def test_context_can_be_standalone(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course,
text="dummy text",
commentable_id=self.discussion_topic_id
)
request = RequestFactory().get("dummy_url")
request.user = self.user
response = views.inline_discussion(
request,
unicode(self.course.id),
self.discussion_topic_id,
)
json_response = json.loads(response.content)
self.assertEqual(json_response['discussion_data'][0]['context'], ThreadContext.STANDALONE)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionGroupIdTestCase(
CohortedTestCase,
CohortedTopicGroupIdTestMixin,
NonCohortedTopicGroupIdTestMixin
):
cs_endpoint = "/threads"
def setUp(self):
super(InlineDiscussionGroupIdTestCase, self).setUp()
self.cohorted_commentable_id = 'cohorted_topic'
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {'commentable_id': self.cohorted_commentable_id}
if group_id:
# avoid causing a server error when the LMS chokes attempting
# to find a group name for the group_id, when we're testing with
# an invalid one.
try:
CourseUserGroup.objects.get(id=group_id)
kwargs['group_id'] = group_id
except CourseUserGroup.DoesNotExist:
pass
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data
)
request.user = user
return views.inline_discussion(
request,
self.course.id.to_deprecated_string(),
commentable_id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
self.cohorted_commentable_id,
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ForumFormDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = user
mako_middleware_process_request(request)
return views.forum_form_discussion(
request,
self.course.id.to_deprecated_string()
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class UserProfileDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/active_threads"
def call_view_for_profiled_user(
self, mock_request, requesting_user, profiled_user, group_id, pass_group_id, is_ajax=False
):
"""
Calls "user_profile" view method on behalf of "requesting_user" to get information about
the user "profiled_user".
"""
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
headers = {}
if is_ajax:
headers['HTTP_X_REQUESTED_WITH'] = "XMLHttpRequest"
request = RequestFactory().get(
"dummy_url",
data=request_data,
**headers
)
request.user = requesting_user
mako_middleware_process_request(request)
return views.user_profile(
request,
self.course.id.to_deprecated_string(),
profiled_user.id
)
def call_view(self, mock_request, _commentable_id, user, group_id, pass_group_id=True, is_ajax=False):
return self.call_view_for_profiled_user(
mock_request, user, user, group_id, pass_group_id=pass_group_id, is_ajax=is_ajax
)
def test_group_info_in_html_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=False
)
self._assert_html_response_contains_group_info(response)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id,
is_ajax=True
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
def _test_group_id_passed_to_user_profile(
self, mock_request, expect_group_id_in_request, requesting_user, profiled_user, group_id, pass_group_id
):
"""
Helper method for testing whether or not group_id was passed to the user_profile request.
"""
def get_params_from_user_info_call(for_specific_course):
"""
Returns the request parameters for the user info call with either course_id specified or not,
depending on value of 'for_specific_course'.
"""
# There will be 3 calls from user_profile. One has the cs_endpoint "active_threads", and it is already
# tested. The other 2 calls are for user info; one of those calls is for general information about the user,
# and it does not specify a course_id. The other call does specify a course_id, and if the caller did not
# have discussion moderator privileges, it should also contain a group_id.
for r_call in mock_request.call_args_list:
if not r_call[0][1].endswith(self.cs_endpoint):
params = r_call[1]["params"]
has_course_id = "course_id" in params
if (for_specific_course and has_course_id) or (not for_specific_course and not has_course_id):
return params
self.assertTrue(
False,
"Did not find appropriate user_profile call for 'for_specific_course'=" + for_specific_course
)
mock_request.reset_mock()
self.call_view_for_profiled_user(
mock_request,
requesting_user,
profiled_user,
group_id,
pass_group_id=pass_group_id,
is_ajax=False
)
# Should never have a group_id if course_id was not included in the request.
params_without_course_id = get_params_from_user_info_call(False)
self.assertNotIn("group_id", params_without_course_id)
params_with_course_id = get_params_from_user_info_call(True)
if expect_group_id_in_request:
self.assertIn("group_id", params_with_course_id)
self.assertEqual(group_id, params_with_course_id["group_id"])
else:
self.assertNotIn("group_id", params_with_course_id)
def test_group_id_passed_to_user_profile_student(self, mock_request):
"""
Test that the group id is always included when requesting user profile information for a particular
course if the requester does not have discussion moderation privileges.
"""
def verify_group_id_always_present(profiled_user, pass_group_id):
"""
Helper method to verify that group_id is always present for student in course
(non-privileged user).
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.student, profiled_user, self.student_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the student (non-privileged user).
# The profile returned on behalf of the student is for the profiled_user.
verify_group_id_always_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=False)
def test_group_id_user_profile_moderator(self, mock_request):
"""
Test that the group id is only included when a privileged user requests user profile information for a
particular course and user if the group_id is explicitly passed in.
"""
def verify_group_id_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, True, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
def verify_group_id_not_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):
"""
Helper method to verify that group_id is not present.
"""
self._test_group_id_passed_to_user_profile(
mock_request, False, self.moderator, profiled_user, requested_cohort.id, pass_group_id
)
# In all these test cases, the requesting_user is the moderator (privileged user).
# If the group_id is explicitly passed, it will be present in the request.
verify_group_id_present(profiled_user=self.student, pass_group_id=True)
verify_group_id_present(profiled_user=self.moderator, pass_group_id=True)
verify_group_id_present(
profiled_user=self.student, pass_group_id=True, requested_cohort=self.student_cohort
)
# If the group_id is not explicitly passed, it will not be present because the requesting_user
# has discussion moderator privileges.
verify_group_id_not_present(profiled_user=self.student, pass_group_id=False)
verify_group_id_not_present(profiled_user=self.moderator, pass_group_id=False)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class FollowedThreadsDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin):
cs_endpoint = "/subscribed_threads"
def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):
kwargs = {}
if group_id:
kwargs['group_id'] = group_id
mock_request.side_effect = make_mock_request_impl(self.course, "dummy content", **kwargs)
request_data = {}
if pass_group_id:
request_data["group_id"] = group_id
request = RequestFactory().get(
"dummy_url",
data=request_data,
HTTP_X_REQUESTED_WITH="XMLHttpRequest"
)
request.user = user
return views.followed_threads(
request,
self.course.id.to_deprecated_string(),
user.id
)
def test_group_info_in_ajax_response(self, mock_request):
response = self.call_view(
mock_request,
"cohorted_topic",
self.student,
self.student_cohort.id
)
self._assert_json_response_contains_group_info(
response, lambda d: d['discussion_data'][0]
)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class InlineDiscussionTestCase(ModuleStoreTestCase):
def setUp(self):
super(InlineDiscussionTestCase, self).setUp()
self.course = CourseFactory.create(org="TestX", number="101", display_name="Test Course")
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.discussion1 = ItemFactory.create(
parent_location=self.course.location,
category="discussion",
discussion_id="discussion1",
display_name='Discussion1',
discussion_category="Chapter",
discussion_target="Discussion1"
)
def send_request(self, mock_request, params=None):
"""
Creates and returns a request with params set, and configures
mock_request to return appropriate values.
"""
request = RequestFactory().get("dummy_url", params if params else {})
request.user = self.student
mock_request.side_effect = make_mock_request_impl(
course=self.course, text="dummy content", commentable_id=self.discussion1.discussion_id
)
return views.inline_discussion(
request, self.course.id.to_deprecated_string(), self.discussion1.discussion_id
)
def verify_response(self, response):
"""Verifies that the response contains the appropriate courseware_url and courseware_title"""
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
expected_courseware_url = '/courses/TestX/101/Test_Course/jump_to/i4x://TestX/101/discussion/Discussion1'
expected_courseware_title = 'Chapter / Discussion1'
self.assertEqual(response_data['discussion_data'][0]['courseware_url'], expected_courseware_url)
self.assertEqual(response_data["discussion_data"][0]["courseware_title"], expected_courseware_title)
def test_courseware_data(self, mock_request):
self.verify_response(self.send_request(mock_request))
def test_context(self, mock_request):
team = CourseTeamFactory(
name='Team Name',
topic_id='A topic',
course_id=self.course.id,
discussion_topic_id=self.discussion1.discussion_id
)
team.add_user(self.student) # pylint: disable=no-member
response = self.send_request(mock_request)
self.assertEqual(mock_request.call_args[1]['params']['context'], ThreadContext.STANDALONE)
self.verify_response(response)
@patch('requests.request', autospec=True)
class UserProfileTestCase(ModuleStoreTestCase):
TEST_THREAD_TEXT = 'userprofile-test-text'
TEST_THREAD_ID = 'userprofile-test-thread-id'
def setUp(self):
super(UserProfileTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
self.profiled_user = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
def get_response(self, mock_request, params, **headers):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().get("dummy_url", data=params, **headers)
request.user = self.student
mako_middleware_process_request(request)
response = views.user_profile(
request,
self.course.id.to_deprecated_string(),
self.profiled_user.id
)
mock_request.assert_any_call(
"get",
StringEndsWithMatcher('/users/{}/active_threads'.format(self.profiled_user.id)),
data=None,
params=PartialDictMatcher({
"course_id": self.course.id.to_deprecated_string(),
"page": params.get("page", 1),
"per_page": views.THREADS_PER_PAGE
}),
headers=ANY,
timeout=ANY
)
return response
def check_html(self, mock_request, **params):
response = self.get_response(mock_request, params)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
html = response.content
self.assertRegexpMatches(html, r'data-page="1"')
self.assertRegexpMatches(html, r'data-num-pages="1"')
self.assertRegexpMatches(html, r'<span>1</span> discussion started')
self.assertRegexpMatches(html, r'<span>2</span> comments')
self.assertRegexpMatches(html, r'"id": "{}"'.format(self.TEST_THREAD_ID))
self.assertRegexpMatches(html, r'"title": "{}"'.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r'"body": "{}"'.format(self.TEST_THREAD_TEXT))
self.assertRegexpMatches(html, r'"username": "{}"'.format(self.student.username))
def check_ajax(self, mock_request, **params):
response = self.get_response(mock_request, params, HTTP_X_REQUESTED_WITH="XMLHttpRequest")
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json; charset=utf-8')
response_data = json.loads(response.content)
self.assertEqual(
sorted(response_data.keys()),
["annotated_content_info", "discussion_data", "num_pages", "page"]
)
self.assertEqual(len(response_data['discussion_data']), 1)
self.assertEqual(response_data["page"], 1)
self.assertEqual(response_data["num_pages"], 1)
self.assertEqual(response_data['discussion_data'][0]['id'], self.TEST_THREAD_ID)
self.assertEqual(response_data['discussion_data'][0]['title'], self.TEST_THREAD_TEXT)
self.assertEqual(response_data['discussion_data'][0]['body'], self.TEST_THREAD_TEXT)
def test_html(self, mock_request):
self.check_html(mock_request)
def test_html_p2(self, mock_request):
self.check_html(mock_request, page="2")
def test_ajax(self, mock_request):
self.check_ajax(mock_request)
def test_ajax_p2(self, mock_request):
self.check_ajax(mock_request, page="2")
def test_404_profiled_user(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
self.course.id.to_deprecated_string(),
-999
)
def test_404_course(self, mock_request):
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.user_profile(
request,
"non/existent/course",
self.profiled_user.id
)
def test_post(self, mock_request):
mock_request.side_effect = make_mock_request_impl(
course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID
)
request = RequestFactory().post("dummy_url")
request.user = self.student
response = views.user_profile(
request,
self.course.id.to_deprecated_string(),
self.profiled_user.id
)
self.assertEqual(response.status_code, 405)
@patch('requests.request', autospec=True)
class CommentsServiceRequestHeadersTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(CommentsServiceRequestHeadersTestCase, self).setUp()
username = "foo"
password = "bar"
# Invoke UrlResetMixin
super(CommentsServiceRequestHeadersTestCase, self).setUp(create_user=False)
self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(
self.client.login(username=username, password=password)
)
def assert_all_calls_have_header(self, mock_request, key, value):
expected = call(
ANY, # method
ANY, # url
data=ANY,
params=ANY,
headers=PartialDictMatcher({key: value}),
timeout=ANY
)
for actual in mock_request.call_args_list:
self.assertEqual(expected, actual)
def test_accept_language(self, mock_request):
lang = "eo"
text = "dummy content"
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
self.client.get(
reverse(
"django_comment_client.forum.views.single_thread",
kwargs={
"course_id": self.course.id.to_deprecated_string(),
"discussion_id": "dummy_discussion_id",
"thread_id": thread_id,
}
),
HTTP_ACCEPT_LANGUAGE=lang,
)
self.assert_all_calls_have_header(mock_request, "Accept-Language", lang)
@override_settings(COMMENTS_SERVICE_KEY="test_api_key")
def test_api_key(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text="dummy", thread_id="dummy")
self.client.get(
reverse(
"django_comment_client.forum.views.forum_form_discussion",
kwargs={"course_id": self.course.id.to_deprecated_string()}
),
)
self.assert_all_calls_have_header(mock_request, "X-Edx-Api-Key", "test_api_key")
class InlineDiscussionUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(InlineDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(InlineDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
response = views.inline_discussion(
request, self.course.id.to_deprecated_string(), self.course.discussion_topics['General']['id']
)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
def _test_invalid_course_id(self):
""" Asserts that Http404 is raised when the course id is not valid. """
request = RequestFactory().get("dummy_url")
request.user = self.student
with self.assertRaises(Http404):
views.inline_discussion(
request, "/some.invalid.key/course-v1:TTT+CS01+2015_T0", self.course.discussion_topics['General']['id']
)
class ForumFormDiscussionUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumFormDiscussionUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumFormDiscussionUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, self.course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
@ddt.ddt
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
class ForumDiscussionXSSTestCase(UrlResetMixin, ModuleStoreTestCase):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ForumDiscussionXSSTestCase, self).setUp()
username = "foo"
password = "bar"
self.course = CourseFactory.create()
self.student = UserFactory.create(username=username, password=password)
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
self.assertTrue(self.client.login(username=username, password=password))
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
def test_forum_discussion_xss_prevent(self, malicious_code, mock_user, mock_req): # pylint: disable=unused-argument
"""
Test that XSS attack is prevented
"""
reverse_url = "%s%s" % (reverse(
"django_comment_client.forum.views.forum_form_discussion",
kwargs={"course_id": unicode(self.course.id)}), '/forum_form_discussion')
# Test that malicious code does not appear in html
url = "%s?%s=%s" % (reverse_url, 'sort_key', malicious_code)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(malicious_code, resp.content)
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
@patch('student.models.cc.User.from_django_user')
@patch('student.models.cc.User.active_threads')
def test_forum_user_profile_xss_prevent(self, malicious_code, mock_threads, mock_from_django_user, mock_request):
"""
Test that XSS attack is prevented
"""
mock_threads.return_value = [], 1, 1
mock_from_django_user.return_value = Mock()
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
url = reverse('django_comment_client.forum.views.user_profile',
kwargs={'course_id': unicode(self.course.id), 'user_id': str(self.student.id)})
# Test that malicious code does not appear in html
url_string = "%s?%s=%s" % (url, 'page', malicious_code)
resp = self.client.get(url_string)
self.assertEqual(resp.status_code, 200)
self.assertNotIn(malicious_code, resp.content)
class ForumDiscussionSearchUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(ForumDiscussionSearchUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(ForumDiscussionSearchUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
data = {
"ajax": 1,
"text": text,
}
request = RequestFactory().get("dummy_url", data)
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.forum_form_discussion(request, self.course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class SingleThreadUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(SingleThreadUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create(discussion_topics={'dummy_discussion_id': {'id': 'dummy_discussion_id'}})
@classmethod
def setUpTestData(cls):
super(SingleThreadUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
thread_id = "test_thread_id"
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.single_thread(request, self.course.id.to_deprecated_string(), "dummy_discussion_id", thread_id)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["content"]["title"], text)
self.assertEqual(response_data["content"]["body"], text)
class UserProfileUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(UserProfileUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(UserProfileUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.user_profile(request, self.course.id.to_deprecated_string(), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class FollowedThreadsUnicodeTestCase(SharedModuleStoreTestCase, UnicodeTestMixin):
@classmethod
def setUpClass(cls):
# pylint: disable=super-method-not-called
with super(FollowedThreadsUnicodeTestCase, cls).setUpClassAndTestData():
cls.course = CourseFactory.create()
@classmethod
def setUpTestData(cls):
super(FollowedThreadsUnicodeTestCase, cls).setUpTestData()
cls.student = UserFactory.create()
CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def _test_unicode_data(self, text, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)
request = RequestFactory().get("dummy_url")
request.user = self.student
request.META["HTTP_X_REQUESTED_WITH"] = "XMLHttpRequest" # so request.is_ajax() == True
response = views.followed_threads(request, self.course.id.to_deprecated_string(), str(self.student.id))
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response_data["discussion_data"][0]["title"], text)
self.assertEqual(response_data["discussion_data"][0]["body"], text)
class EnrollmentTestCase(ModuleStoreTestCase):
"""
Tests for the behavior of views depending on if the student is enrolled
in the course
"""
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(EnrollmentTestCase, self).setUp()
self.course = CourseFactory.create()
self.student = UserFactory.create()
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
@patch('lms.lib.comment_client.utils.requests.request', autospec=True)
def test_unenrolled(self, mock_request):
mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')
request = RequestFactory().get('dummy_url')
request.user = self.student
with self.assertRaises(UserNotEnrolled):
views.forum_form_discussion(request, course_id=self.course.id.to_deprecated_string())
|
Export result time
|
6c5fe8f0-2d5f-11e5-8500-b88d120fff5e
6c6b4575-2d5f-11e5-9302-b88d120fff5e
6c6b4575-2d5f-11e5-9302-b88d120fff5e |
from django import forms
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet
from django.core.validators import RegexValidator
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm
from django.templatetags.static import static
from django.utils.safestring import mark_safe
from .models import MonsterInstance, Summoner, TeamGroup, Team, RuneInstance
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Div, Layout, Field, Button, HTML, Hidden, Reset
from crispy_forms.bootstrap import FormActions, PrependedText, FieldWithButtons, StrictButton, InlineField, Alert
from captcha.fields import ReCaptchaField
import autocomplete_light
STATIC_URL_PREFIX = static('herders/images/')
# User stuff
class CrispyAuthenticationForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(CrispyAuthenticationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'login'
self.helper.layout = Layout(
Field('username'),
Field('password'),
Hidden('next', value='{{ next }}'),
FormActions(Submit('login', 'Log In', css_class='btn-lg btn-primary btn-block')),
)
class CrispyPasswordChangeForm(PasswordChangeForm):
def __init__(self, *args, **kwargs):
super(CrispyPasswordChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'password_change'
self.helper.layout = Layout(
Field('old_password'),
Field('new_password1'),
Field('new_password2'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispyPasswordResetForm(PasswordResetForm):
def __init__(self, *args, **kwargs):
super(CrispyPasswordResetForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'password_reset'
self.helper.layout = Layout(
Field('email'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispySetPasswordForm(SetPasswordForm):
def __init__(self, *args, **kwargs):
super(CrispySetPasswordForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
Field('new_password1'),
Field('new_password2'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispyChangeUsernameForm(forms.Form):
username = forms.CharField(
label='New Username',
required=True,
help_text='This will change the username used to log in and the URL used to access your profile.',
validators=[
RegexValidator(
regex='^[a-zA-Z0-9_]+$',
message='Username must contain only alphanumeric characters and underscore.',
code='invalid_username'
),
]
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_action = 'username_change'
helper.layout = Layout(
Field('username', css_class='input-sm'),
FormActions(Submit('change', 'Change', css_class='btn-lg btn-primary btn-block'))
)
class RegisterUserForm(forms.Form):
username = forms.CharField(
label='Username',
required=True,
help_text='Used to link to your profile to others: http://swarfarm.com/profile/<username>/',
validators=[
RegexValidator(
regex='^[a-zA-Z0-9_]+$',
message='Username must contain only alphanumeric characters and underscore.',
code='invalid_username'
),
]
)
email = forms.EmailField(required=True, help_text='Your email address will only be used for password resets and account expiration notices.')
password = forms.CharField(label="Password", required=True, widget=forms.PasswordInput)
summoner_name = forms.CharField(label="Summoner's War Account Name", required=False, help_text='Not required. Visible to others if you make your SWARFARM account public.')
is_public = forms.BooleanField(label='Make my SWARFARM account visible to others', required=False)
captcha = ReCaptchaField()
helper = FormHelper()
helper.form_method = 'post'
helper.form_action = 'herders:register'
helper.layout = Layout(
Field('username', css_class='input-sm'),
Field('password', css_class='input-sm'),
Field('email', css_class='input-sm'),
Field('summoner_name', css_class='input-sm'),
Field('is_public'),
Field('captcha'),
FormActions(Submit('register', 'Register', css_class='btn-lg btn-primary btn-block'))
)
class EditUserForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditUserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Field('email'),
)
)
class Meta:
model = User
fields = (
'email',
)
class EditSummonerForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditSummonerForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Field('summoner_name'),
Field('public'),
Field('timezone'),
),
)
class Meta:
model = Summoner
fields = (
'summoner_name',
'public',
'timezone',
)
labels = {
'summoner_name': "Summoner's War Account Name",
'public': 'Make my SWARFARM account visible to others',
}
class DeleteProfileForm(forms.Form):
confirmbox = forms.BooleanField(label="I seriously do want to delete my account and all associated data", required=True)
passcode = forms.CharField(
label='Acknowledgement:',
required=True,
help_text='Enter the following text: I acknowledge everything will be permanently deleted',
validators=[
RegexValidator(
regex='^I acknowledge everything will be permanently deleted$',
message="You didn't enter the correct text.",
code='invalid_acknowledgement'
)
]
)
captcha = ReCaptchaField()
helper = FormHelper()
helper.form_method = 'post'
helper.layout = Layout(
Div(
Field('confirmbox', css_class='checkbox'),
Field('passcode', css_class='input-sm'),
Field('captcha'),
FormActions(
Submit('delete', 'Delete', css_class='btn-lg btn-danger btn-block'),
),
css_class='col-md-6 col-md-offset-3',
),
)
# SWARFARM forms
class EditEssenceStorageForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditEssenceStorageForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_show_labels = True
self.helper.layout = Layout(
Div(
Div(
PrependedText('storage_magic_low', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_magic_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_magic_high', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_fire_low', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_fire_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_fire_high', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_water_low', '<img src="' + STATIC_URL_PREFIX + 'essences/water_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_water_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/water_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_water_high', '<img src="' + STATIC_URL_PREFIX + 'essences/water_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_wind_low', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_wind_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_wind_high', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_light_low', '<img src="' + STATIC_URL_PREFIX + 'essences/light_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_light_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/light_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_light_high', '<img src="' + STATIC_URL_PREFIX + 'essences/light_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_dark_low', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_dark_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_dark_high', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
FormActions(
Submit('save', 'Save and Go Back'),
Submit('saveandcontinue', 'Save and Continue Editing'),
),
css_class='row',
)
)
class Meta:
model = Summoner
fields = (
'storage_magic_low',
'storage_magic_mid',
'storage_magic_high',
'storage_fire_low',
'storage_fire_mid',
'storage_fire_high',
'storage_water_low',
'storage_water_mid',
'storage_water_high',
'storage_wind_low',
'storage_wind_mid',
'storage_wind_high',
'storage_light_low',
'storage_light_mid',
'storage_light_high',
'storage_dark_low',
'storage_dark_mid',
'storage_dark_high',
)
labels = {
'storage_magic_low': 'Magic Low',
'storage_magic_mid': 'Magic Mid',
'storage_magic_high': 'Magic High',
'storage_fire_low': 'Fire Low',
'storage_fire_mid': 'Fire Mid',
'storage_fire_high': 'Fire High',
'storage_water_low': 'Water Low',
'storage_water_mid': 'Water Mid',
'storage_water_high': 'Water High',
'storage_wind_low': 'Wind Low',
'storage_wind_mid': 'Wind Mid',
'storage_wind_high': 'Wind High',
'storage_light_low': 'Light Low',
'storage_light_mid': 'Light Mid',
'storage_light_high': 'Light High',
'storage_dark_low': 'Dark Low',
'storage_dark_mid': 'Dark Mid',
'storage_dark_high': 'Dark High',
}
class AddMonsterInstanceForm(autocomplete_light.ModelForm):
monster = autocomplete_light.ModelChoiceField('MonsterAutocomplete')
def __init__(self, *args, **kwargs):
super(AddMonsterInstanceForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Field(
'monster',
data_toggle='popover',
data_trigger='focus',
data_container='body',
title='Autocomplete Tips',
data_content="Enter the monster's awakened or unawakened name (either will work). To further narrow results, type the element too. Example: \"Raksha water\" will list water Rakshasa and Su",
data_stars_field=self['stars'].auto_id,
data_fodder_field=self['fodder'].auto_id,
data_priority_field=self['priority'].auto_id,
data_set_stars='',
),
Field('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
Field('fodder', css_class='checkbox'),
Field('in_storage', css_class='checkbox'),
Field('ignore_for_fusion', css_class='checkbox'),
Field('priority',),
Field('notes'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
Button('cancel', 'Cancel', css_class='btn btn-link', data_dismiss='modal')
),
)
class Meta:
model = MonsterInstance
fields = ('monster', 'stars', 'level', 'fodder', 'in_storage', 'ignore_for_fusion', 'priority', 'notes')
class BulkAddMonsterInstanceFormset(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BulkAddMonsterInstanceFormset, self).__init__(*args, **kwargs)
self.queryset = MonsterInstance.objects.none()
class BulkAddMonsterInstanceForm(autocomplete_light.ModelForm):
monster = autocomplete_light.ModelChoiceField('MonsterAutocomplete')
def __init__(self, *args, **kwargs):
super(BulkAddMonsterInstanceForm, self).__init__(*args, **kwargs)
self.fields['monster'].required = False
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.form_show_labels = False
self.helper.disable_csrf = True
self.helper.layout = Layout(
HTML('<td>'),
InlineField(
'monster',
data_stars_field=self['stars'].auto_id,
data_fodder_field=self['fodder'].auto_id,
data_set_stars=''
),
HTML('</td><td>'),
InlineField('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
HTML('</td><td>'),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
HTML('</td><td>'),
Field('in_storage'),
HTML('</td><td>'),
Field('fodder'),
HTML('</td>'),
)
class Meta:
model = MonsterInstance
fields = ('monster', 'stars', 'level', 'in_storage', 'fodder')
class EditMonsterInstanceForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditMonsterInstanceForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Div(
Field('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
Field('fodder', css_class='checkbox'),
Field('in_storage', css_class='checkbox'),
Field('ignore_for_fusion', css_class='checkbox'),
'priority',
'skill_1_level',
'skill_2_level',
'skill_3_level',
'skill_4_level',
Field('notes'),
),
Div(
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
HTML("""<button class="btn btn-link" data-dismiss="modal">Cancel</button>"""),
),
)
)
class Meta:
model = MonsterInstance
exclude = ('owner', 'monster')
class PowerUpMonsterInstanceForm(forms.Form):
monster = autocomplete_light.ModelMultipleChoiceField('MonsterInstanceAutocomplete')
monster.label = 'Material Monsters'
monster.required = False
ignore_evolution = forms.BooleanField(
label='Ignore evolution error checking',
required=False,
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'ajax-form'
helper.layout = Layout(
Field('monster'),
Field('ignore_evolution'),
FormActions(
Submit('power_up', 'Power Up', css_class='btn btn-primary'),
Submit('evolve', 'Evolve', css_class='btn btn-primary'),
)
)
class AwakenMonsterInstanceForm(forms.Form):
subtract_materials = forms.BooleanField(
label='Subtract Materials from stock (Insufficient quantities will be reduced to 0)',
required=False
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'ajax-form'
helper.layout = Layout(
Div(
Field('subtract_materials', css_class='checkbox', checked=''),
),
Div(
FormActions(
Submit('awaken', 'Awaken', css_class='btn btn-primary'),
HTML("""<a href="{{ return_path }}" class="btn btn-link">Cancel</a>"""),
),
)
)
class AddTeamGroupForm(ModelForm):
def __init__(self, *args, **kwargs):
super(AddTeamGroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
# helper.form_action must be set in view
self.helper.layout = Layout(
Div(
Field('name'),
css_class='modal-body',
),
Div(
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
Button('cancel', 'Cancel', css_class='btn btn-link', data_dismiss='modal')
),
css_class='modal-footer',
)
)
class Meta:
model = TeamGroup
exclude = ('id', 'owner')
class EditTeamGroupForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditTeamGroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.layout = Layout(
Field('name'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
HTML("""<a href="{{ return_path }}" class="btn btn-link">Cancel</a>"""),
HTML("""<a href="{% url 'herders:team_group_delete' profile_name=profile_name group_id=group_id%}" class="btn btn-danger pull-right">Delete</a>"""),
),
)
class Meta:
model = TeamGroup
exclude = ('id', 'owner')
class DeleteTeamGroupForm(forms.Form):
reassign_group = forms.ModelChoiceField(
queryset=TeamGroup.objects.all(),
required=False,
label="Reassign teams in this group to:"
)
helper = FormHelper()
helper.form_method = 'post'
# helper.form_action must be set in view
helper.layout = Layout(
Field('reassign_group', css_class='input-sm'),
FormActions(
Submit('apply', 'Apply', css_class='btn btn-primary'),
Submit('delete', 'Delete all teams', css_class='btn btn-danger'),
)
)
class EditTeamForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditTeamForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_id = 'EditTeamForm'
self.helper.layout = Layout(
Div(
Field('group'),
Field('name'),
Field('favorite'),
),
Field('description'),
Field('leader'),
Field('roster'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
),
)
class Meta:
model = Team
exclude = ('id',)
widgets = {
'roster': autocomplete_light.MultipleChoiceWidget('MonsterInstanceAutocomplete'),
'leader': autocomplete_light.ChoiceWidget('MonsterInstanceAutocomplete'),
}
def clean(self):
from django.core.exceptions import ValidationError
# Check that leader is not also in the roster
leader = self.cleaned_data.get('leader')
roster = self.cleaned_data.get('roster')
if leader in roster:
raise ValidationError(
'Leader cannot be included in the roster as well',
code='leader_in_roster'
)
super(EditTeamForm, self).clean()
class AddRuneInstanceForm(ModelForm):
def __init__(self, *args, **kwargs):
super(AddRuneInstanceForm, self).__init__(*args, **kwargs)
self.fields['type'].choices = self.fields['type'].choices[1:] # Remove the empty '----' option from the list
self.fields['stars'].label = False
self.fields['main_stat'].label = False
self.fields['main_stat_value'].label = False
self.fields['innate_stat'].label = False
self.fields['innate_stat_value'].label = False
self.fields['substat_1'].label = False
self.fields['substat_1_value'].label = False
self.fields['substat_2'].label = False
self.fields['substat_2_value'].label = False
self.fields['substat_3'].label = False
self.fields['substat_3_value'].label = False
self.fields['substat_4'].label = False
self.fields['substat_4_value'].label = False
self.fields['assigned_to'].label = False
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_id = 'addRuneForm'
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Div(
Field('type', template="crispy/rune_button_radio_select.html"),
css_class='col-lg-3',
),
Div(
Div(
Div(Field('slot', placeholder='1-6'), css_class='col-lg-4 col-lg-offset-3'),
Div(Field('level', placeholder='0-15'), css_class='col-lg-5'),
css_class='row'
),
Div(
Div(HTML('<label>Stars</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div(Field('stars', placeholder='1-6'), css_class='col-lg-9'),
css_class='row'
),
Div(
Div(HTML('<label>Stat Type</label>'), css_class='col-lg-4 col-lg-offset-3'),
Div(HTML('<label>Stat Value</label>'), css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Main Stat</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Field('main_stat', wrapper_class='col-lg-4'),
Field('main_stat_value', wrapper_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Innate Stat</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('innate_stat', css_class='col-lg-4'),
Div('innate_stat_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 1</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_1', css_class='col-lg-4'),
Div('substat_1_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 2</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_2', css_class='col-lg-4'),
Div('substat_2_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 3</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_3', css_class='col-lg-4'),
Div('substat_3_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 4</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_4', css_class='col-lg-4'),
Div('substat_4_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Assign To</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div(
Field('assigned_to'),
css_class='col-lg-9',
),
css_class='row',
),
css_class='col-lg-9',
),
Div(css_class='clearfix'),
FormActions(
Submit('save', 'Save'),
),
)
class Meta:
model = RuneInstance
fields = (
'type', 'stars', 'level', 'slot',
'main_stat', 'main_stat_value',
'innate_stat', 'innate_stat_value',
'substat_1', 'substat_1_value',
'substat_2', 'substat_2_value',
'substat_3', 'substat_3_value',
'substat_4', 'substat_4_value',
'assigned_to',
)
widgets = {
'assigned_to': autocomplete_light.ChoiceWidget('MonsterInstanceAutocomplete'),
}
class AssignRuneForm(forms.Form):
type = forms.MultipleChoiceField(
choices=RuneInstance.TYPE_CHOICES,
widget=forms.CheckboxSelectMultiple,
required=False
)
level__gte = forms.IntegerField(
label="Minimum Level",
min_value=0,
max_value=15,
required=False,
)
stars__gte = forms.IntegerField(
label="Minimum Stars",
required=False
)
stars__lte = forms.IntegerField(
label="Maximum Stars",
required=False
)
slot = forms.IntegerField(
min_value=1,
max_value=6,
required=False
)
has_hp = forms.NullBooleanField(label='Has HP', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_atk = forms.NullBooleanField(label='Has ATK', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_def = forms.NullBooleanField(label='Has DEF', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_crit_rate = forms.NullBooleanField(label='Has CRI Rate', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_crit_dmg = forms.NullBooleanField(label='Has CRI Dmg', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_speed = forms.NullBooleanField(label='Has SPD', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_resist = forms.NullBooleanField(label='Has RES', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_accuracy = forms.NullBooleanField(label='Has ACC', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
helper = FormHelper()
helper.form_method = 'post'
helper.form_id = 'AssignRuneForm'
helper.layout = Layout(
FormActions(
StrictButton('Create New', id='addNewRune', css_class='btn btn-primary btn-block'),
Reset('Reset Form', 'Reset Filters', css_class='btn btn-danger btn-block'),
),
Div(
Div(
Field('type', css_class='auto-submit', template='crispy/rune_button_checkbox_select_notext.html'),
Field('has_hp', css_class='auto-submit'),
Field('has_atk', css_class='auto-submit'),
Field('has_def', css_class='auto-submit'),
Field('has_crit_rate', css_class='auto-submit'),
Field('has_crit_dmg', css_class='auto-submit'),
Field('has_speed', css_class='auto-submit'),
Field('has_resist', css_class='auto-submit'),
Field('has_accuracy', css_class='auto-submit'),
css_class='col-md-6',
),
Div(
Field('level__gte', css_class='auto-submit'),
Field('stars__gte', css_class='rating hidden auto-submit', value=1, data_start=0, data_stop=6, data_stars=6),
Field('stars__lte', css_class='rating hidden auto-submit', value=6, data_start=0, data_stop=6, data_stars=6),
css_class='col-md-6',
),
css_class='row',
),
Field('slot', type='hidden', css_class='auto-submit'),
)
class FilterRuneForm(forms.Form):
type = forms.MultipleChoiceField(
choices=RuneInstance.TYPE_CHOICES,
widget=forms.CheckboxSelectMultiple,
required=False,
)
main_stat = forms.MultipleChoiceField(
choices=RuneInstance.STAT_CHOICES,
widget=forms.CheckboxSelectMultiple,
required=False,
)
level__gte = forms.IntegerField(
label="Minimum Level",
min_value=0,
max_value=15,
required=False,
)
stars__gte = forms.IntegerField(
label="Minimum Stars",
required=False
)
stars__lte = forms.IntegerField(
label="Maximum Stars",
required=False
)
slot = forms.IntegerField(
min_value=1,
max_value=6,
required=False
)
assigned_to = forms.NullBooleanField(
label="Is Assigned",
required=False,
widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No')))
)
has_hp = forms.NullBooleanField(label='Has HP', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_atk = forms.NullBooleanField(label='Has ATK', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_def = forms.NullBooleanField(label='Has DEF', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_crit_rate = forms.NullBooleanField(label='Has CRI Rate', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_crit_dmg = forms.NullBooleanField(label='Has CRI Dmg', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_speed = forms.NullBooleanField(label='Has SPD', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_resist = forms.NullBooleanField(label='Has RES', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_accuracy = forms.NullBooleanField(label='Has ACC', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
helper = FormHelper()
helper.form_method = 'post'
helper.form_id = 'FilterInventoryForm'
helper.layout = Layout(
Div(
Div(
Field('main_stat', css_class='auto-submit'),
css_class='col-lg-1',
),
Div(
Div(
Div(
Field('type', css_class='auto-submit', template='crispy/rune_button_checkbox_select_notext.html'),
css_class='col-md-12',
),
css_class='row'
),
Div(
Div(
Div(
Field('slot', css_class='auto-submit'),
css_class='pull-left condensed',
),
Div(
Field('assigned_to', css_class='auto-submit'),
css_class='pull-left condensed',
),
Div(
Field('level__gte', css_class='auto-submit'),
css_class='pull-left condensed',
),
Div(
Field('stars__gte', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
css_class='pull-left condensed'
),
Div(
Field('stars__lte', css_class='rating hidden', value=6, data_start=0, data_stop=6, data_stars=6),
css_class='pull-left condensed'
),
css_class='col-md-12',
),
css_class='row',
),
Div(
Div(
Div(Field('has_hp', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_atk', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_def', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_crit_rate', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_crit_dmg', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_speed', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_resist', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_accuracy', css_class='auto-submit'), css_class='pull-left condensed'),
css_class='col-md-12',
),
css_class='row',
),
css_class='col-lg-10',
),
css_class='row',
),
FormActions(
Reset('Reset Form', 'Reset Filters', css_class='btn btn-danger'),
),
)
class ImportRuneForm(forms.Form):
json_data = forms.CharField(
max_length=999999,
required=True,
label='Paste Rune Data',
help_text=mark_safe('Data is exported from the <a href="https://b7e2310d2b970be56f8b12314a4ade9bfc3d620b-www.googledrive.com/host/0B-GpYLz2ELqgfjdzTURIVFJVcGdlbW8xLWlyQTJKVWs5V0xrZHYyWGlYTFZnMElFX09RVmc/" target="_blank">Summoners War Rune Database and Optimizer</a>'),
widget=forms.Textarea(),
)
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Alert('You can only import runes. Importing will create new runes, not update your current runes. Monsters and saved builds from the spreadsheet are ignored.', css_class='alert-warning'),
Field('json_data'),
FormActions(
Submit('import', 'Import'),
),
)
def clean_json_data(self):
import json
data = self.cleaned_data['json_data']
try:
data = json.loads(data)
except:
raise forms.ValidationError("Error parsing JSON data.")
return data
class ExportRuneForm(forms.Form):
json_data = forms.CharField(
max_length=999999,
label='Exported Rune Data',
help_text=mark_safe('You can paste this data into the <a href="https://b7e2310d2b970be56f8b12314a4ade9bfc3d620b-www.googledrive.com/host/0B-GpYLz2ELqgfjdzTURIVFJVcGdlbW8xLWlyQTJKVWs5V0xrZHYyWGlYTFZnMElFX09RVmc/" target="_blank">Summoners War Rune Database and Optimizer</a>'),
widget=forms.Textarea(),
)
helper = FormHelper()
helper.form_show_labels = False
helper.layout = Layout(
Alert('Importing this data will into the optimizer spreadsheet <strong>OVERWRITE</strong> all runes, monsters, and saved builds currently present. It is advised to back up your existing data first.', css_class='alert-danger'),
Field('json_data'),
)
Update col sizes in a poor attempt to make the filters more usable on a small screen
from django import forms
from django.forms import ModelForm
from django.forms.models import BaseModelFormSet
from django.core.validators import RegexValidator
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm
from django.templatetags.static import static
from django.utils.safestring import mark_safe
from .models import MonsterInstance, Summoner, TeamGroup, Team, RuneInstance
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Div, Layout, Field, Button, HTML, Hidden, Reset
from crispy_forms.bootstrap import FormActions, PrependedText, FieldWithButtons, StrictButton, InlineField, Alert
from captcha.fields import ReCaptchaField
import autocomplete_light
STATIC_URL_PREFIX = static('herders/images/')
# User stuff
class CrispyAuthenticationForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(CrispyAuthenticationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'login'
self.helper.layout = Layout(
Field('username'),
Field('password'),
Hidden('next', value='{{ next }}'),
FormActions(Submit('login', 'Log In', css_class='btn-lg btn-primary btn-block')),
)
class CrispyPasswordChangeForm(PasswordChangeForm):
def __init__(self, *args, **kwargs):
super(CrispyPasswordChangeForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'password_change'
self.helper.layout = Layout(
Field('old_password'),
Field('new_password1'),
Field('new_password2'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispyPasswordResetForm(PasswordResetForm):
def __init__(self, *args, **kwargs):
super(CrispyPasswordResetForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.form_action = 'password_reset'
self.helper.layout = Layout(
Field('email'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispySetPasswordForm(SetPasswordForm):
def __init__(self, *args, **kwargs):
super(CrispySetPasswordForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
Field('new_password1'),
Field('new_password2'),
FormActions(Submit('submit', 'Submit', css_class='btn-lg btn-primary btn-block')),
)
class CrispyChangeUsernameForm(forms.Form):
username = forms.CharField(
label='New Username',
required=True,
help_text='This will change the username used to log in and the URL used to access your profile.',
validators=[
RegexValidator(
regex='^[a-zA-Z0-9_]+$',
message='Username must contain only alphanumeric characters and underscore.',
code='invalid_username'
),
]
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_action = 'username_change'
helper.layout = Layout(
Field('username', css_class='input-sm'),
FormActions(Submit('change', 'Change', css_class='btn-lg btn-primary btn-block'))
)
class RegisterUserForm(forms.Form):
username = forms.CharField(
label='Username',
required=True,
help_text='Used to link to your profile to others: http://swarfarm.com/profile/<username>/',
validators=[
RegexValidator(
regex='^[a-zA-Z0-9_]+$',
message='Username must contain only alphanumeric characters and underscore.',
code='invalid_username'
),
]
)
email = forms.EmailField(required=True, help_text='Your email address will only be used for password resets and account expiration notices.')
password = forms.CharField(label="Password", required=True, widget=forms.PasswordInput)
summoner_name = forms.CharField(label="Summoner's War Account Name", required=False, help_text='Not required. Visible to others if you make your SWARFARM account public.')
is_public = forms.BooleanField(label='Make my SWARFARM account visible to others', required=False)
captcha = ReCaptchaField()
helper = FormHelper()
helper.form_method = 'post'
helper.form_action = 'herders:register'
helper.layout = Layout(
Field('username', css_class='input-sm'),
Field('password', css_class='input-sm'),
Field('email', css_class='input-sm'),
Field('summoner_name', css_class='input-sm'),
Field('is_public'),
Field('captcha'),
FormActions(Submit('register', 'Register', css_class='btn-lg btn-primary btn-block'))
)
class EditUserForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditUserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Field('email'),
)
)
class Meta:
model = User
fields = (
'email',
)
class EditSummonerForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditSummonerForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Field('summoner_name'),
Field('public'),
Field('timezone'),
),
)
class Meta:
model = Summoner
fields = (
'summoner_name',
'public',
'timezone',
)
labels = {
'summoner_name': "Summoner's War Account Name",
'public': 'Make my SWARFARM account visible to others',
}
class DeleteProfileForm(forms.Form):
confirmbox = forms.BooleanField(label="I seriously do want to delete my account and all associated data", required=True)
passcode = forms.CharField(
label='Acknowledgement:',
required=True,
help_text='Enter the following text: I acknowledge everything will be permanently deleted',
validators=[
RegexValidator(
regex='^I acknowledge everything will be permanently deleted$',
message="You didn't enter the correct text.",
code='invalid_acknowledgement'
)
]
)
captcha = ReCaptchaField()
helper = FormHelper()
helper.form_method = 'post'
helper.layout = Layout(
Div(
Field('confirmbox', css_class='checkbox'),
Field('passcode', css_class='input-sm'),
Field('captcha'),
FormActions(
Submit('delete', 'Delete', css_class='btn-lg btn-danger btn-block'),
),
css_class='col-md-6 col-md-offset-3',
),
)
# SWARFARM forms
class EditEssenceStorageForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditEssenceStorageForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_show_labels = True
self.helper.layout = Layout(
Div(
Div(
PrependedText('storage_magic_low', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_magic_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_magic_high', '<img src="' + STATIC_URL_PREFIX + 'essences/magic_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_fire_low', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_fire_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_fire_high', '<img src="' + STATIC_URL_PREFIX + 'essences/fire_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_water_low', '<img src="' + STATIC_URL_PREFIX + 'essences/water_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_water_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/water_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_water_high', '<img src="' + STATIC_URL_PREFIX + 'essences/water_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_wind_low', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_wind_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_wind_high', '<img src="' + STATIC_URL_PREFIX + 'essences/wind_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_light_low', '<img src="' + STATIC_URL_PREFIX + 'essences/light_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_light_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/light_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_light_high', '<img src="' + STATIC_URL_PREFIX + 'essences/light_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
Div(
PrependedText('storage_dark_low', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_low.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_dark_mid', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_mid.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
Div(
PrependedText('storage_dark_high', '<img src="' + STATIC_URL_PREFIX + 'essences/dark_high.png" class="prepended-image"/>', min=0),
css_class='col-lg-1 storage_group prepended-image-group',
),
css_class='row',
),
Div(
FormActions(
Submit('save', 'Save and Go Back'),
Submit('saveandcontinue', 'Save and Continue Editing'),
),
css_class='row',
)
)
class Meta:
model = Summoner
fields = (
'storage_magic_low',
'storage_magic_mid',
'storage_magic_high',
'storage_fire_low',
'storage_fire_mid',
'storage_fire_high',
'storage_water_low',
'storage_water_mid',
'storage_water_high',
'storage_wind_low',
'storage_wind_mid',
'storage_wind_high',
'storage_light_low',
'storage_light_mid',
'storage_light_high',
'storage_dark_low',
'storage_dark_mid',
'storage_dark_high',
)
labels = {
'storage_magic_low': 'Magic Low',
'storage_magic_mid': 'Magic Mid',
'storage_magic_high': 'Magic High',
'storage_fire_low': 'Fire Low',
'storage_fire_mid': 'Fire Mid',
'storage_fire_high': 'Fire High',
'storage_water_low': 'Water Low',
'storage_water_mid': 'Water Mid',
'storage_water_high': 'Water High',
'storage_wind_low': 'Wind Low',
'storage_wind_mid': 'Wind Mid',
'storage_wind_high': 'Wind High',
'storage_light_low': 'Light Low',
'storage_light_mid': 'Light Mid',
'storage_light_high': 'Light High',
'storage_dark_low': 'Dark Low',
'storage_dark_mid': 'Dark Mid',
'storage_dark_high': 'Dark High',
}
class AddMonsterInstanceForm(autocomplete_light.ModelForm):
monster = autocomplete_light.ModelChoiceField('MonsterAutocomplete')
def __init__(self, *args, **kwargs):
super(AddMonsterInstanceForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Field(
'monster',
data_toggle='popover',
data_trigger='focus',
data_container='body',
title='Autocomplete Tips',
data_content="Enter the monster's awakened or unawakened name (either will work). To further narrow results, type the element too. Example: \"Raksha water\" will list water Rakshasa and Su",
data_stars_field=self['stars'].auto_id,
data_fodder_field=self['fodder'].auto_id,
data_priority_field=self['priority'].auto_id,
data_set_stars='',
),
Field('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
Field('fodder', css_class='checkbox'),
Field('in_storage', css_class='checkbox'),
Field('ignore_for_fusion', css_class='checkbox'),
Field('priority',),
Field('notes'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
Button('cancel', 'Cancel', css_class='btn btn-link', data_dismiss='modal')
),
)
class Meta:
model = MonsterInstance
fields = ('monster', 'stars', 'level', 'fodder', 'in_storage', 'ignore_for_fusion', 'priority', 'notes')
class BulkAddMonsterInstanceFormset(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BulkAddMonsterInstanceFormset, self).__init__(*args, **kwargs)
self.queryset = MonsterInstance.objects.none()
class BulkAddMonsterInstanceForm(autocomplete_light.ModelForm):
monster = autocomplete_light.ModelChoiceField('MonsterAutocomplete')
def __init__(self, *args, **kwargs):
super(BulkAddMonsterInstanceForm, self).__init__(*args, **kwargs)
self.fields['monster'].required = False
self.helper = FormHelper(self)
self.helper.form_tag = False
self.helper.form_show_labels = False
self.helper.disable_csrf = True
self.helper.layout = Layout(
HTML('<td>'),
InlineField(
'monster',
data_stars_field=self['stars'].auto_id,
data_fodder_field=self['fodder'].auto_id,
data_set_stars=''
),
HTML('</td><td>'),
InlineField('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
HTML('</td><td>'),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
HTML('</td><td>'),
Field('in_storage'),
HTML('</td><td>'),
Field('fodder'),
HTML('</td>'),
)
class Meta:
model = MonsterInstance
fields = ('monster', 'stars', 'level', 'in_storage', 'fodder')
class EditMonsterInstanceForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditMonsterInstanceForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Div(
Field('stars', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
FieldWithButtons(
Field('level', value=1, min=1, max=40),
StrictButton("Max", name="Set_Max_Level", data_stars_field=self['stars'].auto_id, data_level_field=self['level'].auto_id, data_set_max_level=''),
),
Field('fodder', css_class='checkbox'),
Field('in_storage', css_class='checkbox'),
Field('ignore_for_fusion', css_class='checkbox'),
'priority',
'skill_1_level',
'skill_2_level',
'skill_3_level',
'skill_4_level',
Field('notes'),
),
Div(
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
HTML("""<button class="btn btn-link" data-dismiss="modal">Cancel</button>"""),
),
)
)
class Meta:
model = MonsterInstance
exclude = ('owner', 'monster')
class PowerUpMonsterInstanceForm(forms.Form):
monster = autocomplete_light.ModelMultipleChoiceField('MonsterInstanceAutocomplete')
monster.label = 'Material Monsters'
monster.required = False
ignore_evolution = forms.BooleanField(
label='Ignore evolution error checking',
required=False,
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'ajax-form'
helper.layout = Layout(
Field('monster'),
Field('ignore_evolution'),
FormActions(
Submit('power_up', 'Power Up', css_class='btn btn-primary'),
Submit('evolve', 'Evolve', css_class='btn btn-primary'),
)
)
class AwakenMonsterInstanceForm(forms.Form):
subtract_materials = forms.BooleanField(
label='Subtract Materials from stock (Insufficient quantities will be reduced to 0)',
required=False
)
helper = FormHelper()
helper.form_method = 'post'
helper.form_class = 'ajax-form'
helper.layout = Layout(
Div(
Field('subtract_materials', css_class='checkbox', checked=''),
),
Div(
FormActions(
Submit('awaken', 'Awaken', css_class='btn btn-primary'),
HTML("""<a href="{{ return_path }}" class="btn btn-link">Cancel</a>"""),
),
)
)
class AddTeamGroupForm(ModelForm):
def __init__(self, *args, **kwargs):
super(AddTeamGroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
# helper.form_action must be set in view
self.helper.layout = Layout(
Div(
Field('name'),
css_class='modal-body',
),
Div(
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
Button('cancel', 'Cancel', css_class='btn btn-link', data_dismiss='modal')
),
css_class='modal-footer',
)
)
class Meta:
model = TeamGroup
exclude = ('id', 'owner')
class EditTeamGroupForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditTeamGroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.layout = Layout(
Field('name'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
HTML("""<a href="{{ return_path }}" class="btn btn-link">Cancel</a>"""),
HTML("""<a href="{% url 'herders:team_group_delete' profile_name=profile_name group_id=group_id%}" class="btn btn-danger pull-right">Delete</a>"""),
),
)
class Meta:
model = TeamGroup
exclude = ('id', 'owner')
class DeleteTeamGroupForm(forms.Form):
reassign_group = forms.ModelChoiceField(
queryset=TeamGroup.objects.all(),
required=False,
label="Reassign teams in this group to:"
)
helper = FormHelper()
helper.form_method = 'post'
# helper.form_action must be set in view
helper.layout = Layout(
Field('reassign_group', css_class='input-sm'),
FormActions(
Submit('apply', 'Apply', css_class='btn btn-primary'),
Submit('delete', 'Delete all teams', css_class='btn btn-danger'),
)
)
class EditTeamForm(ModelForm):
def __init__(self, *args, **kwargs):
super(EditTeamForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_id = 'EditTeamForm'
self.helper.layout = Layout(
Div(
Field('group'),
Field('name'),
Field('favorite'),
),
Field('description'),
Field('leader'),
Field('roster'),
FormActions(
Submit('save', 'Save', css_class='btn btn-primary'),
),
)
class Meta:
model = Team
exclude = ('id',)
widgets = {
'roster': autocomplete_light.MultipleChoiceWidget('MonsterInstanceAutocomplete'),
'leader': autocomplete_light.ChoiceWidget('MonsterInstanceAutocomplete'),
}
def clean(self):
from django.core.exceptions import ValidationError
# Check that leader is not also in the roster
leader = self.cleaned_data.get('leader')
roster = self.cleaned_data.get('roster')
if leader in roster:
raise ValidationError(
'Leader cannot be included in the roster as well',
code='leader_in_roster'
)
super(EditTeamForm, self).clean()
class AddRuneInstanceForm(ModelForm):
def __init__(self, *args, **kwargs):
super(AddRuneInstanceForm, self).__init__(*args, **kwargs)
self.fields['type'].choices = self.fields['type'].choices[1:] # Remove the empty '----' option from the list
self.fields['stars'].label = False
self.fields['main_stat'].label = False
self.fields['main_stat_value'].label = False
self.fields['innate_stat'].label = False
self.fields['innate_stat_value'].label = False
self.fields['substat_1'].label = False
self.fields['substat_1_value'].label = False
self.fields['substat_2'].label = False
self.fields['substat_2_value'].label = False
self.fields['substat_3'].label = False
self.fields['substat_3_value'].label = False
self.fields['substat_4'].label = False
self.fields['substat_4_value'].label = False
self.fields['assigned_to'].label = False
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.form_id = 'addRuneForm'
self.helper.form_class = 'ajax-form'
self.helper.layout = Layout(
Div(
Field('type', template="crispy/rune_button_radio_select.html"),
css_class='col-lg-3',
),
Div(
Div(
Div(Field('slot', placeholder='1-6'), css_class='col-lg-4 col-lg-offset-3'),
Div(Field('level', placeholder='0-15'), css_class='col-lg-5'),
css_class='row'
),
Div(
Div(HTML('<label>Stars</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div(Field('stars', placeholder='1-6'), css_class='col-lg-9'),
css_class='row'
),
Div(
Div(HTML('<label>Stat Type</label>'), css_class='col-lg-4 col-lg-offset-3'),
Div(HTML('<label>Stat Value</label>'), css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Main Stat</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Field('main_stat', wrapper_class='col-lg-4'),
Field('main_stat_value', wrapper_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Innate Stat</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('innate_stat', css_class='col-lg-4'),
Div('innate_stat_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 1</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_1', css_class='col-lg-4'),
Div('substat_1_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 2</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_2', css_class='col-lg-4'),
Div('substat_2_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 3</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_3', css_class='col-lg-4'),
Div('substat_3_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Substat 4</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div('substat_4', css_class='col-lg-4'),
Div('substat_4_value', css_class='col-lg-5'),
css_class='row',
),
Div(
Div(HTML('<label>Assign To</label>'), css_class='col-lg-3 text-right no-right-gutter'),
Div(
Field('assigned_to'),
css_class='col-lg-9',
),
css_class='row',
),
css_class='col-lg-9',
),
Div(css_class='clearfix'),
FormActions(
Submit('save', 'Save'),
),
)
class Meta:
model = RuneInstance
fields = (
'type', 'stars', 'level', 'slot',
'main_stat', 'main_stat_value',
'innate_stat', 'innate_stat_value',
'substat_1', 'substat_1_value',
'substat_2', 'substat_2_value',
'substat_3', 'substat_3_value',
'substat_4', 'substat_4_value',
'assigned_to',
)
widgets = {
'assigned_to': autocomplete_light.ChoiceWidget('MonsterInstanceAutocomplete'),
}
class AssignRuneForm(forms.Form):
type = forms.MultipleChoiceField(
choices=RuneInstance.TYPE_CHOICES,
widget=forms.CheckboxSelectMultiple,
required=False
)
level__gte = forms.IntegerField(
label="Minimum Level",
min_value=0,
max_value=15,
required=False,
)
stars__gte = forms.IntegerField(
label="Minimum Stars",
required=False
)
stars__lte = forms.IntegerField(
label="Maximum Stars",
required=False
)
slot = forms.IntegerField(
min_value=1,
max_value=6,
required=False
)
has_hp = forms.NullBooleanField(label='Has HP', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_atk = forms.NullBooleanField(label='Has ATK', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_def = forms.NullBooleanField(label='Has DEF', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_crit_rate = forms.NullBooleanField(label='Has CRI Rate', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_crit_dmg = forms.NullBooleanField(label='Has CRI Dmg', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_speed = forms.NullBooleanField(label='Has SPD', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_resist = forms.NullBooleanField(label='Has RES', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_accuracy = forms.NullBooleanField(label='Has ACC', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
helper = FormHelper()
helper.form_method = 'post'
helper.form_id = 'AssignRuneForm'
helper.layout = Layout(
FormActions(
StrictButton('Create New', id='addNewRune', css_class='btn btn-primary btn-block'),
Reset('Reset Form', 'Reset Filters', css_class='btn btn-danger btn-block'),
),
Div(
Div(
Field('type', css_class='auto-submit', template='crispy/rune_button_checkbox_select_notext.html'),
Field('has_hp', css_class='auto-submit'),
Field('has_atk', css_class='auto-submit'),
Field('has_def', css_class='auto-submit'),
Field('has_crit_rate', css_class='auto-submit'),
Field('has_crit_dmg', css_class='auto-submit'),
Field('has_speed', css_class='auto-submit'),
Field('has_resist', css_class='auto-submit'),
Field('has_accuracy', css_class='auto-submit'),
css_class='col-md-6',
),
Div(
Field('level__gte', css_class='auto-submit'),
Field('stars__gte', css_class='rating hidden auto-submit', value=1, data_start=0, data_stop=6, data_stars=6),
Field('stars__lte', css_class='rating hidden auto-submit', value=6, data_start=0, data_stop=6, data_stars=6),
css_class='col-md-6',
),
css_class='row',
),
Field('slot', type='hidden', css_class='auto-submit'),
)
class FilterRuneForm(forms.Form):
type = forms.MultipleChoiceField(
choices=RuneInstance.TYPE_CHOICES,
widget=forms.CheckboxSelectMultiple,
required=False,
)
main_stat = forms.MultipleChoiceField(
choices=RuneInstance.STAT_CHOICES,
widget=forms.CheckboxSelectMultiple,
required=False,
)
level__gte = forms.IntegerField(
label="Minimum Level",
min_value=0,
max_value=15,
required=False,
)
stars__gte = forms.IntegerField(
label="Minimum Stars",
required=False
)
stars__lte = forms.IntegerField(
label="Maximum Stars",
required=False
)
slot = forms.IntegerField(
min_value=1,
max_value=6,
required=False
)
assigned_to = forms.NullBooleanField(
label="Is Assigned",
required=False,
widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No')))
)
has_hp = forms.NullBooleanField(label='Has HP', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_atk = forms.NullBooleanField(label='Has ATK', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_def = forms.NullBooleanField(label='Has DEF', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_crit_rate = forms.NullBooleanField(label='Has CRI Rate', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_crit_dmg = forms.NullBooleanField(label='Has CRI Dmg', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_speed = forms.NullBooleanField(label='Has SPD', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_resist = forms.NullBooleanField(label='Has RES', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
has_accuracy = forms.NullBooleanField(label='Has ACC', required=False, widget=forms.Select(choices=((None, '---'), (True, 'Yes'), (False, 'No'))))
helper = FormHelper()
helper.form_method = 'post'
helper.form_id = 'FilterInventoryForm'
helper.layout = Layout(
Div(
Div(
Field('main_stat', css_class='auto-submit'),
css_class='col-sm-1',
),
Div(
Div(
Div(
Field('type', css_class='auto-submit', template='crispy/rune_button_checkbox_select_notext.html'),
css_class='col-sm-12',
),
css_class='row'
),
Div(
Div(
Div(
Field('slot', css_class='auto-submit'),
css_class='pull-left condensed',
),
Div(
Field('assigned_to', css_class='auto-submit'),
css_class='pull-left condensed',
),
Div(
Field('level__gte', css_class='auto-submit'),
css_class='pull-left condensed',
),
Div(
Field('stars__gte', css_class='rating hidden', value=1, data_start=0, data_stop=6, data_stars=6),
css_class='pull-left condensed'
),
Div(
Field('stars__lte', css_class='rating hidden', value=6, data_start=0, data_stop=6, data_stars=6),
css_class='pull-left condensed'
),
css_class='col-sm-12',
),
css_class='row',
),
Div(
Div(
Div(Field('has_hp', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_atk', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_def', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_crit_rate', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_crit_dmg', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_speed', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_resist', css_class='auto-submit'), css_class='pull-left condensed'),
Div(Field('has_accuracy', css_class='auto-submit'), css_class='pull-left condensed'),
css_class='col-sm-12',
),
css_class='row',
),
css_class='col-sm-10',
),
css_class='row',
),
FormActions(
Reset('Reset Form', 'Reset Filters', css_class='btn btn-danger'),
),
)
class ImportRuneForm(forms.Form):
json_data = forms.CharField(
max_length=999999,
required=True,
label='Paste Rune Data',
help_text=mark_safe('Data is exported from the <a href="https://b7e2310d2b970be56f8b12314a4ade9bfc3d620b-www.googledrive.com/host/0B-GpYLz2ELqgfjdzTURIVFJVcGdlbW8xLWlyQTJKVWs5V0xrZHYyWGlYTFZnMElFX09RVmc/" target="_blank">Summoners War Rune Database and Optimizer</a>'),
widget=forms.Textarea(),
)
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Alert('You can only import runes. Importing will create new runes, not update your current runes. Monsters and saved builds from the spreadsheet are ignored.', css_class='alert-warning'),
Field('json_data'),
FormActions(
Submit('import', 'Import'),
),
)
def clean_json_data(self):
import json
data = self.cleaned_data['json_data']
try:
data = json.loads(data)
except:
raise forms.ValidationError("Error parsing JSON data.")
return data
class ExportRuneForm(forms.Form):
json_data = forms.CharField(
max_length=999999,
label='Exported Rune Data',
help_text=mark_safe('You can paste this data into the <a href="https://b7e2310d2b970be56f8b12314a4ade9bfc3d620b-www.googledrive.com/host/0B-GpYLz2ELqgfjdzTURIVFJVcGdlbW8xLWlyQTJKVWs5V0xrZHYyWGlYTFZnMElFX09RVmc/" target="_blank">Summoners War Rune Database and Optimizer</a>'),
widget=forms.Textarea(),
)
helper = FormHelper()
helper.form_show_labels = False
helper.layout = Layout(
Alert('Importing this data will into the optimizer spreadsheet <strong>OVERWRITE</strong> all runes, monsters, and saved builds currently present. It is advised to back up your existing data first.', css_class='alert-danger'),
Field('json_data'),
)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import posixpath
import time
import urlparse
from generator import items
from scrapy.linkextractors import LinkExtractor
from scrapy import spiders
class SitemapSpider(spiders.CrawlSpider):
name = 'sitemap'
rules = [
spiders.Rule(
LinkExtractor(
allow=[
r'.*\.html',
r'.*\.pdf',
r'.*\.xml',
r'.*\.txt',
r'.*/',
],
deny=[
r'/trunk/',
r'/draft/',
r'/api/'
]
),
follow=True, callback='parse_item'
)
]
def __init__(self, domain='docs.openstack.org', urls='', *args, **kwargs):
super(SitemapSpider, self).__init__(*args, **kwargs)
self.domain = domain
self.allowed_domains = [domain]
self.start_urls = ['http://%s/index.html' % domain]
for url in urls.split(','):
self.start_urls.append(url)
def parse_item(self, response):
item = items.SitemapItem()
item['priority'] = '0.5'
item['changefreq'] = 'daily'
item['loc'] = response.url
path = urlparse.urlsplit(response.url).path
filename = posixpath.basename(path)
if filename == 'index.html' or filename == '':
item['priority'] = '1.0'
weekly = [
'juno',
'icehouse',
'havana'
]
for entry in weekly:
if path.startswith("/%s" % entry):
item['changefreq'] = 'weekly'
if 'Last-Modified' in response.headers:
timestamp = response.headers['Last-Modified']
else:
timestamp = response.headers['Date']
lastmod = time.strptime(timestamp, "%a, %d %b %Y %H:%M:%S %Z")
item['lastmod'] = time.strftime("%Y-%m-%dT%H:%M:%S%z", lastmod)
return item
[sitemap] set higher priority for files of the current release
Change-Id: I9dbaa787354582f2f766fcce58aff95766d242c7
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import urlparse
from generator import items
from scrapy.linkextractors import LinkExtractor
from scrapy import spiders
class SitemapSpider(spiders.CrawlSpider):
name = 'sitemap'
old_releases = tuple(["/%s" % old_release for old_release in [
'austin',
'bexar',
'cactus',
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo'
]])
rules = [
spiders.Rule(
LinkExtractor(
allow=[
r'.*\.html',
r'.*\.pdf',
r'.*\.xml',
r'.*\.txt',
r'.*/',
],
deny=[
r'/trunk/',
r'/draft/',
r'/api/'
]
),
follow=True, callback='parse_item'
)
]
def __init__(self, domain='docs.openstack.org', urls='', *args, **kwargs):
super(SitemapSpider, self).__init__(*args, **kwargs)
self.domain = domain
self.allowed_domains = [domain]
self.start_urls = ['http://%s/index.html' % domain]
for url in urls.split(','):
self.start_urls.append(url)
def parse_item(self, response):
item = items.SitemapItem()
item['loc'] = response.url
path = urlparse.urlsplit(response.url).path
if path.startswith(self.old_releases):
# weekly changefrequency and lower priority for old files
item['priority'] = '0.5'
item['changefreq'] = 'weekly'
else:
# daily changefrequency and highest priority for current files
item['priority'] = '1.0'
item['changefreq'] = 'daily'
if 'Last-Modified' in response.headers:
timestamp = response.headers['Last-Modified']
else:
timestamp = response.headers['Date']
lastmod = time.strptime(timestamp, "%a, %d %b %Y %H:%M:%S %Z")
item['lastmod'] = time.strftime("%Y-%m-%dT%H:%M:%S%z", lastmod)
return item
|
""" Cron-able script to store a CSV export file of answersheets on disk to be
emailed to interested recipients.
"""
import datetime
import logging
from snippetscream.csv_serializer import UnicodeWriter
from django.core.management.base import BaseCommand
from django.db.models import Count
from survey import constants
from survey.models import AnswerSheet
logger = logging.getLogger('survey_answersheet_csv_export')
class Command(BaseCommand):
help = "Saves askMAMA answersheet results as CSV file"
def handle(self, *args, **options):
# generate a name for the CSV file
now = datetime.datetime.now()
filedate = "%04d%02d%02d" % (now.year, now.month, now.day)
filename = "askMAMA_Survey_Answers_%s.csv" % (filedate)
# determine the maximum answers to display per sheet
max_answers = AnswerSheet.objects.get_max_answers()
# open the output file
try:
with open(filename, 'wt') as outfile:
# create the csv writer
writer = UnicodeWriter(outfile)
# construct the header line
header_line = ['User', 'Questionnaire', 'Date Submitted',
'Status', 'Score']
for idx in range(max_answers):
header_line.append('Question %s' % (idx+1))
header_line.append('Answer %s' % (idx+1))
# write the header line
writer.writerow(header_line)
# loop through the database data to build the response
qs = AnswerSheet.objects.all().order_by('questionnaire', 'user')
for sheet in qs:
data = [sheet.user.username, sheet.questionnaire.title,
"%s" % sheet.date_created,
sheet.get_status_text(),
"%s" % sheet.calculate_score()
]
for answer in sheet.multichoiceanswer_set.all():
data.append(answer.question.question_text)
data.append(answer.chosen_option.option_text)
writer.writerow(data)
outfile.close()
except IOError as exc:
logger.error("%s: %s", filename, exc.strerror)
Fixed pep-8 violation
""" Cron-able script to store a CSV export file of answersheets on disk to be
emailed to interested recipients.
"""
import datetime
import logging
from snippetscream.csv_serializer import UnicodeWriter
from django.core.management.base import BaseCommand
from django.db.models import Count
from survey import constants
from survey.models import AnswerSheet
logger = logging.getLogger('survey_answersheet_csv_export')
class Command(BaseCommand):
help = "Saves askMAMA answersheet results as CSV file"
def handle(self, *args, **options):
# generate a name for the CSV file
now = datetime.datetime.now()
filedate = "%04d%02d%02d" % (now.year, now.month, now.day)
filename = "askMAMA_Survey_Answers_%s.csv" % (filedate)
# determine the maximum answers to display per sheet
max_answers = AnswerSheet.objects.get_max_answers()
# open the output file
try:
with open(filename, 'wt') as outfile:
# create the csv writer
writer = UnicodeWriter(outfile)
# construct the header line
header_line = ['User', 'Questionnaire', 'Date Submitted',
'Status', 'Score']
for idx in range(max_answers):
header_line.append('Question %s' % (idx+1))
header_line.append('Answer %s' % (idx+1))
# write the header line
writer.writerow(header_line)
# loop through the database data to build the response
qs = AnswerSheet.objects.all().order_by(
'questionnaire', 'user')
for sheet in qs:
data = [sheet.user.username, sheet.questionnaire.title,
"%s" % sheet.date_created,
sheet.get_status_text(),
"%s" % sheet.calculate_score()
]
for answer in sheet.multichoiceanswer_set.all():
data.append(answer.question.question_text)
data.append(answer.chosen_option.option_text)
writer.writerow(data)
outfile.close()
except IOError as exc:
logger.error("%s: %s", filename, exc.strerror)
|
9c5b8c07-2d5f-11e5-90a4-b88d120fff5e
9c64d3c7-2d5f-11e5-80a4-b88d120fff5e
9c64d3c7-2d5f-11e5-80a4-b88d120fff5e |
# syft absolute
from syft.lib.python.list import List
from syft.lib.python.slice import Slice
from syft.lib.python.string import String
from syft.lib.python.tuple import Tuple
def test_slice_types() -> None:
py_string = "Python"
py_list = ["P", "y", "t", "h", "o", "n"]
py_tuple = ("P", "y", "t", "h", "o", "n")
sy_string = String(py_string)
sy_tuple = Tuple(py_tuple)
sy_list = List(py_list)
py_slice1 = slice(1)
sy_slice1 = Slice(1)
assert py_slice1.start == sy_slice1.start
assert py_slice1.stop == sy_slice1.stop
assert py_slice1.step == sy_slice1.step
assert py_slice1 == sy_slice1
py_slice2 = slice(1, 2)
sy_slice2 = Slice(1, 2)
assert py_slice2 == sy_slice2
assert py_slice2.start == sy_slice2.start
assert py_slice2.stop == sy_slice2.stop
assert py_slice2.step == sy_slice2.step
py_slice3 = slice(1, 2, -1)
sy_slice3 = Slice(1, 2, -1)
assert py_slice3 == sy_slice3
assert py_slice3.start == sy_slice3.start
assert py_slice3.stop == sy_slice3.stop
assert py_slice3.step == sy_slice3.step
assert sy_string[sy_slice1] == py_string[py_slice1]
assert sy_string[sy_slice2] == py_string[py_slice2]
assert sy_string[sy_slice3] == py_string[py_slice3]
assert sy_tuple[sy_slice1] == py_tuple[py_slice1]
assert sy_tuple[sy_slice2] == py_tuple[py_slice2]
assert sy_tuple[sy_slice3] == py_tuple[py_slice3]
assert sy_list[sy_slice1] == py_list[py_slice1]
assert sy_list[sy_slice2] == py_list[py_slice2]
assert sy_list[sy_slice3] == py_list[py_slice3]
assert sy_list[py_slice3] == py_list[py_slice3]
Added tests in slice_test.py (#5513)
# stdlib
import itertools
import operator
import sys
from test import support
import unittest
import weakref
# third party
import pytest
# syft absolute
from syft.lib.python.list import List
from syft.lib.python.slice import Slice
from syft.lib.python.string import String
from syft.lib.python.tuple import Tuple
def evaluate_slice_index(arg):
"""
Helper function to convert a slice argument to an integer, and raise
TypeError with a suitable message on failure.
"""
if hasattr(arg, "__index__"):
return operator.index(arg)
else:
raise TypeError(
"slice indices must be integers or " "None or have an __index__ method"
)
def slice_indices(slice, length):
"""
Reference implementation for the slice.indices method.
"""
# Compute step and length as integers.
length = operator.index(length)
step = 1 if slice.step is None else evaluate_slice_index(slice.step)
# Raise ValueError for negative length or zero step.
if length < 0:
raise ValueError("length should not be negative")
if step == 0:
raise ValueError("slice step cannot be zero")
# Find lower and upper bounds for start and stop.
lower = -1 if step < 0 else 0
upper = length - 1 if step < 0 else length
# Compute start.
if slice.start is None:
start = upper if step < 0 else lower
else:
start = evaluate_slice_index(slice.start)
start = max(start + length, lower) if start < 0 else min(start, upper)
# Compute stop.
if slice.stop is None:
stop = lower if step < 0 else upper
else:
stop = evaluate_slice_index(slice.stop)
stop = max(stop + length, lower) if stop < 0 else min(stop, upper)
return start, stop, step
# Class providing an __index__ method. Used for testing slice.indices.
class MyIndexable(object):
def __init__(self, value):
self.value = value
def __index__(self):
return self.value
class SliceTest(unittest.TestCase):
@pytest.mark.xfail
def test_constructor(self):
self.assertRaises(TypeError, Slice)
self.assertRaises(TypeError, Slice, 1, 2, 3, 4)
def test_repr(self):
self.assertEqual(repr(Slice(1, 2, 3))[:33], "<syft.lib.python.Slice object at ")
def test_hash(self):
# Verify clearing of SF bug #800796
self.assertRaises(TypeError, hash, Slice(5))
with self.assertRaises(TypeError):
slice(5).__hash__()
def test_cmp(self):
s1 = Slice(1, 2, 3)
s2 = Slice(1, 2, 3)
s3 = Slice(1, 2, 4)
self.assertEqual(s1.value, s2.value)
self.assertNotEqual(s1.value, s3.value)
self.assertNotEqual(s1, None)
self.assertNotEqual(s1.value, (1, 2, 3))
self.assertNotEqual(s1, "")
class Exc(Exception):
pass
class BadCmp(object):
def __eq__(self, other):
raise Exc
s1 = Slice(BadCmp())
s2 = Slice(BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, lambda: s1.value == s2.value)
s1 = Slice(1, BadCmp())
s2 = Slice(1, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, lambda: s1.value == s2.value)
s1 = Slice(1, 2, BadCmp())
s2 = Slice(1, 2, BadCmp())
self.assertEqual(s1, s1)
self.assertRaises(Exc, lambda: s1.value == s2.value)
def test_members(self):
s = Slice(1)
self.assertEqual(s.start, None)
self.assertEqual(s.stop, 1)
self.assertEqual(s.step, None)
s = Slice(1, 2)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, None)
s = Slice(1, 2, 3)
self.assertEqual(s.start, 1)
self.assertEqual(s.stop, 2)
self.assertEqual(s.step, 3)
class AnyClass:
pass
obj = AnyClass()
s = Slice(obj)
self.assertTrue(s.stop is obj)
def check_indices(self, slice, length):
try:
actual = slice.indices(length)
except ValueError:
actual = "valueerror"
try:
expected = slice_indices(slice, length)
except ValueError:
expected = "valueerror"
self.assertEqual(actual, expected)
if length >= 0 and slice.step != 0:
actual = range(*slice.indices(length))
expected = range(length)[slice.value]
self.assertEqual(actual, expected)
def test_indices(self):
self.assertEqual(Slice(None).indices(10), (0, 10, 1))
self.assertEqual(Slice(None, None, 2).indices(10), (0, 10, 2))
self.assertEqual(Slice(1, None, 2).indices(10), (1, 10, 2))
self.assertEqual(Slice(None, None, -1).indices(10), (9, -1, -1))
self.assertEqual(Slice(None, None, -2).indices(10), (9, -1, -2))
self.assertEqual(Slice(3, None, -2).indices(10), (3, -1, -2))
# issue 3004 tests
self.assertEqual(Slice(None, -9).indices(10), (0, 1, 1))
self.assertEqual(Slice(None, -10).indices(10), (0, 0, 1))
self.assertEqual(Slice(None, -11).indices(10), (0, 0, 1))
self.assertEqual(Slice(None, -10, -1).indices(10), (9, 0, -1))
self.assertEqual(Slice(None, -11, -1).indices(10), (9, -1, -1))
self.assertEqual(Slice(None, -12, -1).indices(10), (9, -1, -1))
self.assertEqual(Slice(None, 9).indices(10), (0, 9, 1))
self.assertEqual(Slice(None, 10).indices(10), (0, 10, 1))
self.assertEqual(Slice(None, 11).indices(10), (0, 10, 1))
self.assertEqual(Slice(None, 8, -1).indices(10), (9, 8, -1))
self.assertEqual(Slice(None, 9, -1).indices(10), (9, 9, -1))
self.assertEqual(Slice(None, 10, -1).indices(10), (9, 9, -1))
self.assertEqual(Slice(-100, 100).indices(10), Slice(None).indices(10))
self.assertEqual(
Slice(100, -100, -1).indices(10), Slice(None, None, -1).indices(10)
)
self.assertEqual(Slice(-100, 100, 2).indices(10), (0, 10, 2))
self.assertEqual(list(range(10))[:: sys.maxsize - 1], [0])
# Check a variety of start, stop, step and length values, including
# values exceeding sys.maxsize (see issue #14794).
vals = [
None,
-(2 ** 100),
-(2 ** 30),
-53,
-7,
-1,
0,
1,
7,
53,
2 ** 30,
2 ** 100,
]
lengths = [0, 1, 7, 53, 2 ** 30, 2 ** 100]
for slice_args in itertools.product(vals, repeat=3):
s = Slice(*slice_args)
for length in lengths:
self.check_indices(s, length)
self.check_indices(Slice(0, 10, 1), -3)
# Negative length should raise ValueError
with self.assertRaises(ValueError):
Slice(None).indices(-1)
# Zero step should raise ValueError
with self.assertRaises(ValueError):
Slice(0, 10, 0).indices(5)
# Using a start, stop or step or length that can't be interpreted as an
# integer should give a TypeError ...
with self.assertRaises(TypeError):
Slice(0.0, 10, 1).indices(5)
with self.assertRaises(TypeError):
Slice(0, 10.0, 1).indices(5)
with self.assertRaises(TypeError):
Slice(0, 10, 1.0).indices(5)
with self.assertRaises(TypeError):
Slice(0, 10, 1).indices(5.0)
# ... but it should be fine to use a custom class that provides index.
self.assertEqual(Slice(0, 10, 1).indices(5), (0, 5, 1))
self.assertEqual(Slice(MyIndexable(0), 10, 1).indices(5), (0, 5, 1))
self.assertEqual(Slice(0, MyIndexable(10), 1).indices(5), (0, 5, 1))
self.assertEqual(Slice(0, 10, MyIndexable(1)).indices(5), (0, 5, 1))
self.assertEqual(Slice(0, 10, 1).indices(MyIndexable(5)), (0, 5, 1))
def test_setslice_without_getslice(self):
tmp = []
class X(object):
def __setitem__(self, i, k):
tmp.append((i, k))
x = X()
x[1:2] = 42
self.assertEqual(tmp, [(Slice(1, 2), 42)])
def test_cycle(self):
class myobj:
pass
o = myobj()
o.s = Slice(o)
w = weakref.ref(o)
o = None
support.gc_collect()
self.assertIsNone(w())
def test_slice_types(self) -> None:
py_string = "Python"
py_list = ["P", "y", "t", "h", "o", "n"]
py_tuple = ("P", "y", "t", "h", "o", "n")
sy_string = String(py_string)
sy_tuple = Tuple(py_tuple)
sy_list = List(py_list)
py_slice1 = slice(1)
sy_slice1 = Slice(1)
assert py_slice1.start == sy_slice1.start
assert py_slice1.stop == sy_slice1.stop
assert py_slice1.step == sy_slice1.step
assert py_slice1 == sy_slice1
py_slice2 = slice(1, 2)
sy_slice2 = Slice(1, 2)
assert py_slice2 == sy_slice2
assert py_slice2.start == sy_slice2.start
assert py_slice2.stop == sy_slice2.stop
assert py_slice2.step == sy_slice2.step
py_slice3 = slice(1, 2, -1)
sy_slice3 = Slice(1, 2, -1)
assert py_slice3 == sy_slice3
assert py_slice3.start == sy_slice3.start
assert py_slice3.stop == sy_slice3.stop
assert py_slice3.step == sy_slice3.step
assert sy_string[sy_slice1] == py_string[py_slice1]
assert sy_string[sy_slice2] == py_string[py_slice2]
assert sy_string[sy_slice3] == py_string[py_slice3]
assert sy_tuple[sy_slice1] == py_tuple[py_slice1]
assert sy_tuple[sy_slice2] == py_tuple[py_slice2]
assert sy_tuple[sy_slice3] == py_tuple[py_slice3]
assert sy_list[sy_slice1] == py_list[py_slice1]
assert sy_list[sy_slice2] == py_list[py_slice2]
assert sy_list[sy_slice3] == py_list[py_slice3]
assert sy_list[py_slice3] == py_list[py_slice3]
|
#!/usr/bin/env python
"""
owtf is an OWASP+PTES-focused try to unite great tools and facilitate pen testing
Copyright (c) 2011, Abraham Aranguren <name.surname@gmail.com> Twitter: @7a_ http://7-a.org
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright owner nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The shell module allows running arbitrary shell commands and is critical to the
framework in order to run third party tools.
"""
#import shlex
import subprocess
from framework.lib.general import *
from collections import defaultdict
class Shell:
def __init__(self, Core):
self.DynamicReplacements = {} # Some settings like the plugin output dir are dynamic, config is no place for those
self.Core = Core
self.CommandInfo = defaultdict(list)
def ShellPathEscape(self, Text):
return MultipleReplace(Text, { ' ':'\ ', '(':'\(', ')':'\)' }).strip()
def RefreshReplacements(self):
self.DynamicReplacements['###PLUGIN_OUTPUT_DIR###'] = self.Core.Config.Get('PLUGIN_OUTPUT_DIR')
def GetModifiedShellCommand(self, Command, PluginOutputDir):
self.RefreshReplacements()
NewCommand = "cd "+self.ShellPathEscape(PluginOutputDir)+"; "+MultipleReplace(Command, self.DynamicReplacements)
self.StartCommand(Command, NewCommand)
return NewCommand
def StartCommand(self, OriginalCommand, ModifiedCommand):
CommandInfo = defaultdict(list)
self.Core.Timer.StartTimer('Command')
CommandInfo = { 'OriginalCommand' : OriginalCommand, 'ModifiedCommand' : ModifiedCommand, 'Start' : self.Core.Timer.GetStartDateTimeAsStr('Command') }
self.CommandInfo = CommandInfo
def FinishCommand(self, CommandInfo, WasCancelled):
CommandInfo['End'] = self.Core.Timer.GetEndDateTimeAsStr('Command')
Status = "Finished"
if WasCancelled:
Status = "Cancelled"
CommandInfo['Status'] = Status
CommandInfo['RunTime'] = self.Core.Timer.GetElapsedTimeAsStr('Command')
CommandInfo['Target'] = self.Core.Config.Get('TARGET')
self.Core.DB.CommandRegister.Add(CommandInfo)
self.CommandInfo = defaultdict(list)
def CanRunCommand(self, Command):
Target = self.Core.DB.CommandRegister.AlreadyRegistered(Command['OriginalCommand'])
if Target: # Command was run before
if Target == self.Core.Config.Get('TARGET'): # Run several times against same target for grep plugins. #and self.Core.Config.Get('FORCE_OVERWRITE'):
return [ None, True ] # Can only run again if against the same target and when -f was specified
return [Target, False ]
return [ None, True ] # Command was not run before
def shell_exec_monitor(self, Command):
if not self.CommandInfo:
self.StartCommand(Command, Command)
Target, CanRun = self.CanRunCommand(self.CommandInfo)
if not CanRun:
Message = "The command was already run for target: "+Target
return Message
cprint("\nExecuting (Control+C to abort THIS COMMAND ONLY):\n"+Command)
cprint("")
cprint("------> Execution Start Date/Time: "+self.Core.Timer.GetStartDateTimeAsStr('Command'))
cprint("")
Output = ''
Cancelled = False
try: # Stolen from: http://stackoverflow.com/questions/5833716/how-to-capture-output-of-a-shell-script-running-in-a-separate-process-in-a-wxpyt
proc = subprocess.Popen(Command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1)
while True:
line = proc.stdout.readline()
if not line:
break
# NOTE: Below MUST BE print instead of "cprint" to clearly distinguish between owtf output and tool output
print MultipleReplace(line, { "\n":"", "\r":"" }) # Show progress on the screen too!
Output += line # Save as much output as possible before a tool crashes! :)
except KeyboardInterrupt:
Cancelled = True
self.FinishCommand(self.CommandInfo, Cancelled)
Output += self.Core.Error.UserAbort('Command', Output) # Identify as Command Level abort
if not Cancelled:
self.FinishCommand(self.CommandInfo, Cancelled)
return Output
def shell_exec(self, Command, **kwds): # Mostly used for internal framework commands
#Stolen from (added shell=True tweak, necessary for easy piping straight via the command line, etc):
#http://stackoverflow.com/questions/236737/making-a-system-call-that-returns-the-stdout-output-as-a-string/236909#236909
kwds.setdefault("stdout", subprocess.PIPE)
kwds.setdefault("stderr", subprocess.STDOUT)
p = subprocess.Popen(Command, shell=True, **kwds)
return p.communicate()[0]
[shell] PEP8 on import
TODO: Remove import *
#!/usr/bin/env python
"""
owtf is an OWASP+PTES-focused try to unite great tools and facilitate pen testing
Copyright (c) 2011, Abraham Aranguren <name.surname@gmail.com> Twitter: @7a_ http://7-a.org
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright owner nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The shell module allows running arbitrary shell commands and is critical to the
framework in order to run third party tools.
"""
import subprocess
from collections import defaultdict
from framework.lib.general import *
class Shell(object):
def __init__(self, Core):
self.DynamicReplacements = {} # Some settings like the plugin output dir are dynamic, config is no place for those
self.Core = Core
self.CommandInfo = defaultdict(list)
def ShellPathEscape(self, Text):
return MultipleReplace(Text, { ' ':'\ ', '(':'\(', ')':'\)' }).strip()
def RefreshReplacements(self):
self.DynamicReplacements['###PLUGIN_OUTPUT_DIR###'] = self.Core.Config.Get('PLUGIN_OUTPUT_DIR')
def GetModifiedShellCommand(self, Command, PluginOutputDir):
self.RefreshReplacements()
NewCommand = "cd "+self.ShellPathEscape(PluginOutputDir)+"; "+MultipleReplace(Command, self.DynamicReplacements)
self.StartCommand(Command, NewCommand)
return NewCommand
def StartCommand(self, OriginalCommand, ModifiedCommand):
CommandInfo = defaultdict(list)
self.Core.Timer.StartTimer('Command')
CommandInfo = { 'OriginalCommand' : OriginalCommand, 'ModifiedCommand' : ModifiedCommand, 'Start' : self.Core.Timer.GetStartDateTimeAsStr('Command') }
self.CommandInfo = CommandInfo
def FinishCommand(self, CommandInfo, WasCancelled):
CommandInfo['End'] = self.Core.Timer.GetEndDateTimeAsStr('Command')
Status = "Finished"
if WasCancelled:
Status = "Cancelled"
CommandInfo['Status'] = Status
CommandInfo['RunTime'] = self.Core.Timer.GetElapsedTimeAsStr('Command')
CommandInfo['Target'] = self.Core.Config.Get('TARGET')
self.Core.DB.CommandRegister.Add(CommandInfo)
self.CommandInfo = defaultdict(list)
def CanRunCommand(self, Command):
Target = self.Core.DB.CommandRegister.AlreadyRegistered(Command['OriginalCommand'])
if Target: # Command was run before
if Target == self.Core.Config.Get('TARGET'): # Run several times against same target for grep plugins. #and self.Core.Config.Get('FORCE_OVERWRITE'):
return [ None, True ] # Can only run again if against the same target and when -f was specified
return [Target, False ]
return [ None, True ] # Command was not run before
def shell_exec_monitor(self, Command):
if not self.CommandInfo:
self.StartCommand(Command, Command)
Target, CanRun = self.CanRunCommand(self.CommandInfo)
if not CanRun:
Message = "The command was already run for target: "+Target
return Message
cprint("\nExecuting (Control+C to abort THIS COMMAND ONLY):\n"+Command)
cprint("")
cprint("------> Execution Start Date/Time: "+self.Core.Timer.GetStartDateTimeAsStr('Command'))
cprint("")
Output = ''
Cancelled = False
try: # Stolen from: http://stackoverflow.com/questions/5833716/how-to-capture-output-of-a-shell-script-running-in-a-separate-process-in-a-wxpyt
proc = subprocess.Popen(Command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1)
while True:
line = proc.stdout.readline()
if not line:
break
# NOTE: Below MUST BE print instead of "cprint" to clearly distinguish between owtf output and tool output
print MultipleReplace(line, { "\n":"", "\r":"" }) # Show progress on the screen too!
Output += line # Save as much output as possible before a tool crashes! :)
except KeyboardInterrupt:
Cancelled = True
self.FinishCommand(self.CommandInfo, Cancelled)
Output += self.Core.Error.UserAbort('Command', Output) # Identify as Command Level abort
if not Cancelled:
self.FinishCommand(self.CommandInfo, Cancelled)
return Output
def shell_exec(self, Command, **kwds): # Mostly used for internal framework commands
#Stolen from (added shell=True tweak, necessary for easy piping straight via the command line, etc):
#http://stackoverflow.com/questions/236737/making-a-system-call-that-returns-the-stdout-output-as-a-string/236909#236909
kwds.setdefault("stdout", subprocess.PIPE)
kwds.setdefault("stderr", subprocess.STDOUT)
p = subprocess.Popen(Command, shell=True, **kwds)
return p.communicate()[0]
|
import pandas as pd
import numpy as np
from typing import Union, Tuple, Dict, Optional
from pandas.core.groupby import DataFrameGroupBy
from siuba.dply.verbs import singledispatch2, gather, var_create, var_select
def pivot_longer_spec(data,
spec,
names_repair: Optional[str] = "check_unique",
values_drop_na: bool = False):
pass
@singledispatch2(pd.DataFrame)
def pivot_longer(
__data,
*args,
names_to: Union[str, Tuple[str, ...]] = "name",
names_prefix: Optional[str] = None,
names_sep: Optional[str] = None,
names_pattern: Optional[str] = None,
names_ptypes: Optional[Tuple] = None,
names_repair: str = "check_unique",
values_to: str = "value",
values_drop_na: bool = False,
values_ptypes: Optional[Union[str, Tuple[str, ...]]] = None,
values_transform: Optional[Dict] = dict(),
):
if names_sep is not None and names_pattern is not None:
raise ValueError("You may only use either `names_sep` or "
"`names_pattern`.")
if isinstance(names_to, str):
names_to = (names_to,)
# Copied selection over from gather, maybe this can be compartmentalised?
var_list = var_create(*args)
od = var_select(__data.columns, *var_list)
value_vars = list(od) or None
id_vars = [col for col in __data.columns if col not in od]
keep_data = __data.loc[:,id_vars]
if value_vars is None:
# While stack works in this case, it will later on merge in to the
# original dataframe. To copy tidyr behaviour, we need to raise a
# ValueError
# stacked = __data.stack(dropna=values_drop_na)
raise ValueError("Please provide at least 1 column or all columns "
"(shorthand: _[:]).")
elif names_sep is not None or names_pattern is not None:
to_stack = __data.loc[:,value_vars]
column_index = (
to_stack.columns.str.split(names_sep).map(tuple)
if names_sep is not None
# Split by names_pattern, and remove empty strings using filter
else to_stack.columns.str.split(names_pattern).map(
lambda x: tuple(list(filter(None, x)))
)
)
split_lengths = np.array(column_index.map(len))
if not np.all(split_lengths == split_lengths[0]):
raise ValueError(
"Splitting by {} leads to unequal lenghts ({}).".format(
names_sep if names_sep is not None else names_pattern
)
)
if split_lengths[0] != len(names_to):
raise ValueError("Splitting provided more values than provided in "
"`names_to`")
# TODO: To set names for the new index, we need to feed in a list.
# There's no particular reason to use a tuples as input in the first
# place, might be worth reconsidering the choice of input format?
# TODO: What if we don't use '_value' in the tuple? Need to check tidyr
stack_idx = (
[i for i, x in enumerate(list(names_to)) if x != "_value"]
if names_to != ('_value',)
else -1
)
names_to = [x if x != "_value" else None for x in names_to]
column_index = column_index.set_names(names_to)
to_stack.columns = column_index
stacked = to_stack.stack(stack_idx)
stacked = stacked.reset_index(level=stacked.index.nlevels - 1)
if stack_idx == -1:
stacked = stacked.drop(columns='level_1')
if np.nan in names_to:
stacked = stacked.drop(columns=[np.nan])
if values_drop_na:
stacked = stacked.dropna(axis = 1)
else:
stacked = __data.loc[:,value_vars].stack(dropna=values_drop_na)
# Set column names for stack
# As in tidyr `values_to` is ignored if `names_sep` or `names_pattern`
# is provided.
stacked.index.rename(names_to[0], level=1, inplace=True)
stacked.name = values_to
# values_transform was introduced in tidyr 1.1.0
if values_to in values_transform:
# TODO: error handling -- this won't work for dictionaries
# list needs special handling, as it can only be applied to iterables,
# not integers.
if values_transform[values_to] == list:
stacked = stacked.apply(lambda x: [x])
else:
stacked = stacked.apply(lambda x: values_transform[values_to](x))
stacked_df = (
# if `names_sep` or `names_pattern` are not provided `stacked` will
# be a pd.Series and needs its index reset.
stacked.reset_index(1)
if names_sep is None and names_pattern is None
else stacked
)
# If we want to pivot all but one, we are left with a `pd.Series`.
# This needs to be converted to a DataFrame to serve as left element in a
# merge
if isinstance(keep_data, pd.Series):
output_df = keep_data.to_frame().merge(stacked_df, left_index=True, right_index=True)
elif keep_data.empty:
output_df = stacked_df
else:
output_df = keep_data.merge(stacked_df, left_index=True, right_index=True)
return output_df
@pivot_longer.register(DataFrameGroupBy)
def _pivot_longer_gdf(__data, *args, **kwargs):
# TODO: consolidate all verbs that punt to DataFrame version (#118)
prior_groups = [el.name for el in __data.grouper.groupings]
df = __data.obj
res = pivot_longer(df, *args, **kwargs)
missing_groups = set(prior_groups) - set(res.columns)
if missing_groups:
raise ValueError(
"When using pivot_longer on grouped data, the result must contain "
"original grouping columns. Missing group columns: %s" %missing_groups
)
return res.groupby(prior_groups)
chore: have pivot_longer_spec raise not implemented for now
import pandas as pd
import numpy as np
from typing import Union, Tuple, Dict, Optional
from pandas.core.groupby import DataFrameGroupBy
from siuba.dply.verbs import singledispatch2, gather, var_create, var_select
def pivot_longer_spec(data,
spec,
names_repair: Optional[str] = "check_unique",
values_drop_na: bool = False):
raise NotImplementedError("TODO: see https://github.com/machow/siuba/issues/293")
@singledispatch2(pd.DataFrame)
def pivot_longer(
__data,
*args,
names_to: Union[str, Tuple[str, ...]] = "name",
names_prefix: Optional[str] = None,
names_sep: Optional[str] = None,
names_pattern: Optional[str] = None,
names_ptypes: Optional[Tuple] = None,
names_repair: str = "check_unique",
values_to: str = "value",
values_drop_na: bool = False,
values_ptypes: Optional[Union[str, Tuple[str, ...]]] = None,
values_transform: Optional[Dict] = dict(),
):
if names_sep is not None and names_pattern is not None:
raise ValueError("You may only use either `names_sep` or "
"`names_pattern`.")
if isinstance(names_to, str):
names_to = (names_to,)
# Copied selection over from gather, maybe this can be compartmentalised?
var_list = var_create(*args)
od = var_select(__data.columns, *var_list)
value_vars = list(od) or None
id_vars = [col for col in __data.columns if col not in od]
keep_data = __data.loc[:,id_vars]
if value_vars is None:
# While stack works in this case, it will later on merge in to the
# original dataframe. To copy tidyr behaviour, we need to raise a
# ValueError
# stacked = __data.stack(dropna=values_drop_na)
raise ValueError("Please provide at least 1 column or all columns "
"(shorthand: _[:]).")
elif names_sep is not None or names_pattern is not None:
to_stack = __data.loc[:,value_vars]
column_index = (
to_stack.columns.str.split(names_sep).map(tuple)
if names_sep is not None
# Split by names_pattern, and remove empty strings using filter
else to_stack.columns.str.split(names_pattern).map(
lambda x: tuple(list(filter(None, x)))
)
)
split_lengths = np.array(column_index.map(len))
if not np.all(split_lengths == split_lengths[0]):
raise ValueError(
"Splitting by {} leads to unequal lenghts ({}).".format(
names_sep if names_sep is not None else names_pattern
)
)
if split_lengths[0] != len(names_to):
raise ValueError("Splitting provided more values than provided in "
"`names_to`")
# TODO: To set names for the new index, we need to feed in a list.
# There's no particular reason to use a tuples as input in the first
# place, might be worth reconsidering the choice of input format?
# TODO: What if we don't use '_value' in the tuple? Need to check tidyr
stack_idx = (
[i for i, x in enumerate(list(names_to)) if x != "_value"]
if names_to != ('_value',)
else -1
)
names_to = [x if x != "_value" else None for x in names_to]
column_index = column_index.set_names(names_to)
to_stack.columns = column_index
stacked = to_stack.stack(stack_idx)
stacked = stacked.reset_index(level=stacked.index.nlevels - 1)
if stack_idx == -1:
stacked = stacked.drop(columns='level_1')
if np.nan in names_to:
stacked = stacked.drop(columns=[np.nan])
if values_drop_na:
stacked = stacked.dropna(axis = 1)
else:
stacked = __data.loc[:,value_vars].stack(dropna=values_drop_na)
# Set column names for stack
# As in tidyr `values_to` is ignored if `names_sep` or `names_pattern`
# is provided.
stacked.index.rename(names_to[0], level=1, inplace=True)
stacked.name = values_to
# values_transform was introduced in tidyr 1.1.0
if values_to in values_transform:
# TODO: error handling -- this won't work for dictionaries
# list needs special handling, as it can only be applied to iterables,
# not integers.
if values_transform[values_to] == list:
stacked = stacked.apply(lambda x: [x])
else:
stacked = stacked.apply(lambda x: values_transform[values_to](x))
stacked_df = (
# if `names_sep` or `names_pattern` are not provided `stacked` will
# be a pd.Series and needs its index reset.
stacked.reset_index(1)
if names_sep is None and names_pattern is None
else stacked
)
# If we want to pivot all but one, we are left with a `pd.Series`.
# This needs to be converted to a DataFrame to serve as left element in a
# merge
if isinstance(keep_data, pd.Series):
output_df = keep_data.to_frame().merge(stacked_df, left_index=True, right_index=True)
elif keep_data.empty:
output_df = stacked_df
else:
output_df = keep_data.merge(stacked_df, left_index=True, right_index=True)
return output_df
@pivot_longer.register(DataFrameGroupBy)
def _pivot_longer_gdf(__data, *args, **kwargs):
# TODO: consolidate all verbs that punt to DataFrame version (#118)
prior_groups = [el.name for el in __data.grouper.groupings]
df = __data.obj
res = pivot_longer(df, *args, **kwargs)
missing_groups = set(prior_groups) - set(res.columns)
if missing_groups:
raise ValueError(
"When using pivot_longer on grouped data, the result must contain "
"original grouping columns. Missing group columns: %s" %missing_groups
)
return res.groupby(prior_groups)
|
from tastypie.resources import ModelResource
from tastypie import fields
from models import (Province, District, Zone, School)
from tastypie.resources import ALL, ALL_WITH_RELATIONS
from django.conf.urls import url
class ProvinceResource(ModelResource):
"""
This class:
- Adds resource_name for the API
- Returns the required data for the API via Foreign key association,
based on the url
"""
class Meta:
queryset = Province.objects.all()
class DistrictResource(ModelResource):
"""
This class:
- Adds resource_name for the API
- Returns the required data for the API via Foreign key association,
based on the url
"""
province = fields.ForeignKey(ProvinceResource, 'province', full=True)
class Meta:
queryset = District.objects.all()
class ZoneResource(ModelResource):
"""
This class:
- Adds resource_name for the API
- Returns the required data for the API via Foreign key association,
based on the url
"""
district = fields.ForeignKey(DistrictResource, 'district', full=True)
class Meta:
queryset = Zone.objects.all()
class SchoolResource(ModelResource):
"""
This class:
- Adds resource_name for the API
- Returns the required data for the API via Foreign key association,
based on the url
"""
zone = fields.ForeignKey(ZoneResource, 'zone', full=True)
class Meta:
resource_name = "hierarchy"
allowed_methods = ['get']
include_resource_uri = False
queryset = School.objects.all()
fields = ['EMIS', 'name', 'zone']
filtering = {
'EMIS': ALL}
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/EMIS/(?P<EMIS>[\w\d_.-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
changed hierarchy resource to school
from tastypie.resources import ModelResource
from tastypie import fields
from models import (Province, District, Zone, School)
from tastypie.resources import ALL, ALL_WITH_RELATIONS
from django.conf.urls import url
class ProvinceResource(ModelResource):
"""
This class:
- Adds resource_name for the API
- Returns the required data for the API via Foreign key association,
based on the url
"""
class Meta:
queryset = Province.objects.all()
class DistrictResource(ModelResource):
"""
This class:
- Adds resource_name for the API
- Returns the required data for the API via Foreign key association,
based on the url
"""
province = fields.ForeignKey(ProvinceResource, 'province', full=True)
class Meta:
queryset = District.objects.all()
class ZoneResource(ModelResource):
"""
This class:
- Adds resource_name for the API
- Returns the required data for the API via Foreign key association,
based on the url
"""
district = fields.ForeignKey(DistrictResource, 'district', full=True)
class Meta:
queryset = Zone.objects.all()
class SchoolResource(ModelResource):
"""
This class:
- Adds resource_name for the API
- Returns the required data for the API via Foreign key association,
based on the url
"""
zone = fields.ForeignKey(ZoneResource, 'zone', full=True)
class Meta:
resource_name = "school"
allowed_methods = ['get']
include_resource_uri = False
queryset = School.objects.all()
fields = ['emis', 'name', 'zone']
filtering = {
'emis': ALL}
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/EMIS/(?P<emis>[\w\d_.-]+)/$" % self._meta.resource_name, self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
|
a2f0c194-2d5f-11e5-acec-b88d120fff5e
a2fa580a-2d5f-11e5-8e5b-b88d120fff5e
a2fa580a-2d5f-11e5-8e5b-b88d120fff5e |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals, print_function
from six import iteritems, text_type
from werkzeug.local import Local, release_local
import os, sys, importlib, inspect, json
# public
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template, get_email_from_template
__version__ = '8.7.3'
__title__ = "Frappe Framework"
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
from frappe.translate import get_full_dict
if not hasattr(local, 'lang'):
local.lang = lang or 'en'
if not lang:
lang = local.lang
# msg should always be unicode
msg = as_unicode(msg).strip()
# return lang_full_dict according to lang passed parameter
return get_full_dict(lang).get(msg) or msg
def as_unicode(text, encoding='utf-8'):
'''Convert to unicode if required'''
if isinstance(text, text_type):
return text
elif text==None:
return ''
elif isinstance(text, basestring):
return text_type(text, encoding)
else:
return text_type(text)
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None, new_site=False):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"currently_saving": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
"new_site": new_site
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = _dict()
local.user = None
local.user_perms = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.link_count = {}
local.jenv = None
local.jloader =None
local.cache = {}
local.meta_cache = {}
local.form_dict = _dict()
local.session = _dict()
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
elif local.site and not local.flags.new_site:
print("{0} does not exist".format(local.site))
sys.exit(1)
#raise IncorrectSitePath, "{0} does not exist".format(site_config)
return _dict(config)
def get_conf(site=None):
if hasattr(local, 'conf'):
return local.conf
else:
# if no site, get from common_site_config.json
with init_site(site):
return local.conf
class init_site:
def __init__(self, site=None):
'''If site==None, initialize it for empty site ('') to load common_site_config.json'''
self.site = site or ''
def __enter__(self):
init(self.site)
return local
def __exit__(self, type, value, traceback):
destroy()
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get('redis_cache')
or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
import utils
return utils.get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
msg = as_unicode(msg)
if not request or (not "cmd" in local.form_dict) or conf.developer_mode:
print(msg.encode('utf-8'))
error_log.append(msg)
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print(repr(msg))
debug_log.append(as_unicode(msg))
def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param title: [optional] Message title.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
from utils import encode
out = _dict(message=msg)
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception(encode(msg))
else:
raise ValidationError(encode(msg))
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages and out.msg:
print("Message: " + repr(out.msg).encode("utf-8"))
if title:
out.title = title
if not indicator and raise_exception:
indicator = 'red'
if indicator:
out.indicator = indicator
if alert:
out.alert = 1
message_log.append(json.dumps(out))
_raise_exception()
def clear_messages():
local.message_log = []
def throw(msg, exc=ValidationError, title=None):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc, title=title, indicator='red')
def emit_js(js, user=False, **kwargs):
from frappe.async import publish_realtime
if user == False:
user = session.user
publish_realtime('eval_js', js, user=user, **kwargs)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_perms = None
def get_user():
from frappe.utils.user import UserPermissions
if not local.user_perms:
local.user_perms = UserPermissions(local.session.user)
return local.user_perms
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.permissions
return frappe.permissions.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=[], sender="", subject="No Subject", message="No Message",
as_markdown=False, delayed=True, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=[], message_id=None, in_reply_to=None, send_after=None, expose_recipients=None,
send_priority=1, communication=None, retry=1, now=None, read_receipt=None, is_notification=False,
inline_images=None, template=None, args=None, header=None):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param delayed: Send via scheduled email sender **Email Queue**. Don't send immediately. Default is true
:param send_priority: Priority for Email Queue, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To Email Address.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
:param communication: Communication link to be set in Email Queue record
:param inline_images: List of inline images as {"filename", "filecontent"}. All src properties will be replaced with random Content-Id
:param template: Name of html template from templates/emails folder
:param args: Arguments for rendering the template
:param header: Append header in email
"""
text_content = None
if template:
message, text_content = get_email_from_template(template, args)
message = content or message
if as_markdown:
from markdown2 import markdown
message = markdown(message)
if not delayed:
now = True
import email.queue
email.queue.send(recipients=recipients, sender=sender,
subject=subject, message=message, text_content=text_content,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, message_id=message_id, in_reply_to=in_reply_to,
send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority,
communication=communication, now=now, read_receipt=read_receipt, is_notification=is_notification,
inline_images=inline_images, header=header)
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if local.flags.in_test:
return
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.sessions
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
clear_domainification_cache()
local.cache = {}
local.new_doc_templates = {}
for fn in get_hooks("clear_cache"):
get_attr(fn)()
local.role_permissions = {}
def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not doctype and doc:
doctype = doc.doctype
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doc=None, ptype='read', user=None, verbose=False, doctype=None):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
if doc:
if isinstance(doc, basestring):
doc = get_doc(doctype, doc)
doctype = doc.doctype
if doc.flags.ignore_permissions:
return True
# check permission in controller
if hasattr(doc, 'has_website_permission'):
return doc.has_website_permission(ptype, verbose=verbose)
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
for method in hooks:
result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224((txt or "") + repr(time.time()) + repr(random_string(8))).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value=None):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(arg1, arg2=None):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
return frappe.model.document.get_doc(arg1, arg2)
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags)
def delete_doc_if_exists(doctype, name, force=0):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name, force=force)
def reload_doctype(doctype, force=False, reset_permissions=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype),
force=force, reset_permissions=reset_permissions)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions)
def rename_doc(*args, **kwargs):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(*args, **kwargs)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
for app in get_file_items(os.path.join(local.site_path, "apps.txt")):
if app not in apps:
apps.append(app)
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_doc_hooks():
'''Returns hooked methods for given doc. It will expand the dict tuple if required.'''
if not hasattr(local, 'doc_events_hooks'):
hooks = get_hooks('doc_events', {})
out = {}
for key, value in iteritems(hooks):
if isinstance(key, tuple):
for doctype in key:
append_hook(out, doctype, value)
else:
append_hook(out, key, value)
local.doc_events_hooks = out
return local.doc_events_hooks
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
print('Could not find app "{0}"'.format(app_name))
if not request:
sys.exit(1)
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def append_hook(target, key, value):
'''appends a hook to the the target dict.
If the hook key, exists, it will make it a key.
If the hook value is a dict, like doc_events, it will
listify the values against the key.
'''
if isinstance(value, dict):
# dict? make a list of values against each key
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
# make a list
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
if isinstance(path, text_type):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return as_unicode(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if isinstance(fn, basestring):
fn = get_attr(fn)
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties).
If doctype is not specified, it will create a property setter for all fields with the
given fieldname"""
args = _dict(args)
if not args.doctype_or_field:
args.doctype_or_field = 'DocField'
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data'
if not args.doctype:
doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname)
else:
doctype_list = [args.doctype]
for doctype in doctype_list:
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data'
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field,
'doc_type': doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.validate_fieldtype_change()
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import Tool."""
from frappe.core.page.data_import_tool import data_import_tool
data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by']
if not local.flags.in_test:
fields_to_clear.append("docstatus")
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.set("__islocal", 1)
for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']):
newdoc.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for i, d in enumerate(newdoc.get_all_children()):
d.set("__islocal", 1)
for fieldname in fields_to_clear:
d.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None,
context=None, indicator_color=None, primary_action='/', primary_label = None, fullpage=False):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code
:param context: web template context
:param indicator_color: color of indicator in title
:param primary_action: route on primary button (default is `/`)
:param primary_label: label on primary button (defaut is "Home")
:param fullpage: hide header / footer"""
local.message_title = title
local.message = html
local.response['type'] = 'page'
local.response['route'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
if not context:
context = {}
if not indicator_color:
if success:
indicator_color = 'green'
elif http_status_code and http_status_code > 300:
indicator_color = 'red'
else:
indicator_color = 'blue'
context['indicator_color'] = indicator_color
context['primary_label'] = primary_label
context['primary_action'] = primary_action
context['error_code'] = http_status_code
context['fullpage'] = fullpage
local.response['context'] = context
def redirect_to_message(title, html, http_status_code=None, context=None, indicator_color=None):
"""Redirects to /message?id=random
Similar to respond_as_web_page, but used to 'redirect' and show message pages like success, failure, etc. with a detailed message
:param title: Page title and heading.
:param message: Message to be shown.
:param http_status_code: HTTP status code.
Example Usage:
frappe.redirect_to_message(_('Thank you'), "<div><p>You will receive an email at test@example.com</p></div>")
"""
message_id = generate_hash(length=8)
message = {
'context': context or {},
'http_status_code': http_status_code or 200
}
message['context'].update({
'header': title,
'title': title,
'message': html
})
if indicator_color:
message['context'].update({
"indicator_color": indicator_color
})
cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60)
location = '/message?id={0}'.format(message_id)
if not getattr(local, 'is_ajax', False):
local.response["type"] = "redirect"
local.response["location"] = location
else:
return location
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for conditions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler)
def are_emails_muted():
from utils import cint
return flags.mute_emails or cint(conf.get("mute_emails") or 0) or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def format(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output = None):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
local.form_dict.doc = doc
if not html:
html = build_page("printview")
if as_pdf:
return get_pdf(html, output = output)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
local.flags.ignore_print_permissions = True
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
return out
def publish_progress(*args, **kwargs):
"""Show the user progress for a long request
:param percent: Percent progress
:param title: Title
:param doctype: Optional, for DocType
:param name: Optional, for Document name
"""
import frappe.async
return frappe.async.publish_progress(*args, **kwargs)
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.async
return frappe.async.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def enqueue(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param event: this is passed to enable clearing of jobs from queues
:param async: (optional) if async=False, the method is executed immediately, else via a worker
:param job_name: (optional) can be used to name an enqueue call, which can be used to prevent duplicate calls
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue(*args, **kwargs)
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
loggers = {}
log_level = None
def logger(module=None, with_more_info=True):
'''Returns a python logger that uses StreamHandler'''
from frappe.utils.logger import get_logger
return get_logger(module or 'default', with_more_info=with_more_info)
def log_error(message=None, title=None):
'''Log error to Error Log'''
get_doc(dict(doctype='Error Log', error=str(message or get_traceback()),
method=title)).insert(ignore_permissions=True)
def get_desk_link(doctype, name):
return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype))
def bold(text):
return '<b>{0}</b>'.format(text)
def safe_eval(code, eval_globals=None, eval_locals=None):
'''A safer `eval`'''
whitelisted_globals = {
"int": int,
"float": float,
"long": long,
"round": round
}
if '__' in code:
throw('Illegal rule {0}. Cannot use "__"'.format(bold(code)))
if not eval_globals:
eval_globals = {}
eval_globals['__builtins__'] = {}
eval_globals.update(whitelisted_globals)
return eval(code, eval_globals, eval_locals)
def get_active_domains():
""" get the domains set in the Domain Settings as active domain """
active_domains = cache().hget("domains", "active_domains") or None
if active_domains is None:
domains = get_all("Has Domain", filters={ "parent": "Domain Settings" },
fields=["domain"], distinct=True)
active_domains = [row.get("domain") for row in domains]
active_domains.append("")
cache().hset("domains", "active_domains", active_domains)
return active_domains
def get_active_modules():
""" get the active modules from Module Def"""
active_modules = cache().hget("modules", "active_modules") or None
if active_modules is None:
domains = get_active_domains()
modules = get_all("Module Def", filters={"restrict_to_domain": ("in", domains)})
active_modules = [module.name for module in modules]
cache().hset("modules", "active_modules", active_modules)
return active_modules
def clear_domainification_cache():
_cache = cache()
_cache.delete_key("domains", "active_domains")
_cache.delete_key("modules", "active_modules")
def get_system_settings(key):
if not local.system_settings.has_key(key):
local.system_settings.update({key: db.get_single_value('System Settings', key)})
return local.system_settings.get(key)
Encode string before passing to hashlib.sha224
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals, print_function
from six import iteritems, text_type
from werkzeug.local import Local, release_local
import os, sys, importlib, inspect, json
# public
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template, get_email_from_template
__version__ = '8.7.3'
__title__ = "Frappe Framework"
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
from frappe.translate import get_full_dict
if not hasattr(local, 'lang'):
local.lang = lang or 'en'
if not lang:
lang = local.lang
# msg should always be unicode
msg = as_unicode(msg).strip()
# return lang_full_dict according to lang passed parameter
return get_full_dict(lang).get(msg) or msg
def as_unicode(text, encoding='utf-8'):
'''Convert to unicode if required'''
if isinstance(text, text_type):
return text
elif text==None:
return ''
elif isinstance(text, basestring):
return text_type(text, encoding)
else:
return text_type(text)
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None, new_site=False):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"currently_saving": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
"new_site": new_site
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = _dict()
local.user = None
local.user_perms = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.link_count = {}
local.jenv = None
local.jloader =None
local.cache = {}
local.meta_cache = {}
local.form_dict = _dict()
local.session = _dict()
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
elif local.site and not local.flags.new_site:
print("{0} does not exist".format(local.site))
sys.exit(1)
#raise IncorrectSitePath, "{0} does not exist".format(site_config)
return _dict(config)
def get_conf(site=None):
if hasattr(local, 'conf'):
return local.conf
else:
# if no site, get from common_site_config.json
with init_site(site):
return local.conf
class init_site:
def __init__(self, site=None):
'''If site==None, initialize it for empty site ('') to load common_site_config.json'''
self.site = site or ''
def __enter__(self):
init(self.site)
return local
def __exit__(self, type, value, traceback):
destroy()
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get('redis_cache')
or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
import utils
return utils.get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
msg = as_unicode(msg)
if not request or (not "cmd" in local.form_dict) or conf.developer_mode:
print(msg.encode('utf-8'))
error_log.append(msg)
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print(repr(msg))
debug_log.append(as_unicode(msg))
def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param title: [optional] Message title.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
from utils import encode
out = _dict(message=msg)
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception(encode(msg))
else:
raise ValidationError(encode(msg))
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages and out.msg:
print("Message: " + repr(out.msg).encode("utf-8"))
if title:
out.title = title
if not indicator and raise_exception:
indicator = 'red'
if indicator:
out.indicator = indicator
if alert:
out.alert = 1
message_log.append(json.dumps(out))
_raise_exception()
def clear_messages():
local.message_log = []
def throw(msg, exc=ValidationError, title=None):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc, title=title, indicator='red')
def emit_js(js, user=False, **kwargs):
from frappe.async import publish_realtime
if user == False:
user = session.user
publish_realtime('eval_js', js, user=user, **kwargs)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_perms = None
def get_user():
from frappe.utils.user import UserPermissions
if not local.user_perms:
local.user_perms = UserPermissions(local.session.user)
return local.user_perms
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.permissions
return frappe.permissions.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=[], sender="", subject="No Subject", message="No Message",
as_markdown=False, delayed=True, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=[], message_id=None, in_reply_to=None, send_after=None, expose_recipients=None,
send_priority=1, communication=None, retry=1, now=None, read_receipt=None, is_notification=False,
inline_images=None, template=None, args=None, header=None):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param delayed: Send via scheduled email sender **Email Queue**. Don't send immediately. Default is true
:param send_priority: Priority for Email Queue, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To Email Address.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
:param communication: Communication link to be set in Email Queue record
:param inline_images: List of inline images as {"filename", "filecontent"}. All src properties will be replaced with random Content-Id
:param template: Name of html template from templates/emails folder
:param args: Arguments for rendering the template
:param header: Append header in email
"""
text_content = None
if template:
message, text_content = get_email_from_template(template, args)
message = content or message
if as_markdown:
from markdown2 import markdown
message = markdown(message)
if not delayed:
now = True
import email.queue
email.queue.send(recipients=recipients, sender=sender,
subject=subject, message=message, text_content=text_content,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, message_id=message_id, in_reply_to=in_reply_to,
send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority,
communication=communication, now=now, read_receipt=read_receipt, is_notification=is_notification,
inline_images=inline_images, header=header)
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if local.flags.in_test:
return
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.sessions
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
clear_domainification_cache()
local.cache = {}
local.new_doc_templates = {}
for fn in get_hooks("clear_cache"):
get_attr(fn)()
local.role_permissions = {}
def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not doctype and doc:
doctype = doc.doctype
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doc=None, ptype='read', user=None, verbose=False, doctype=None):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
if doc:
if isinstance(doc, basestring):
doc = get_doc(doctype, doc)
doctype = doc.doctype
if doc.flags.ignore_permissions:
return True
# check permission in controller
if hasattr(doc, 'has_website_permission'):
return doc.has_website_permission(ptype, verbose=verbose)
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
for method in hooks:
result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224(((txt or "") + repr(time.time()) + repr(random_string(8))).encode()).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value=None):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(arg1, arg2=None):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
return frappe.model.document.get_doc(arg1, arg2)
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags)
def delete_doc_if_exists(doctype, name, force=0):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name, force=force)
def reload_doctype(doctype, force=False, reset_permissions=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype),
force=force, reset_permissions=reset_permissions)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions)
def rename_doc(*args, **kwargs):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(*args, **kwargs)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
for app in get_file_items(os.path.join(local.site_path, "apps.txt")):
if app not in apps:
apps.append(app)
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_doc_hooks():
'''Returns hooked methods for given doc. It will expand the dict tuple if required.'''
if not hasattr(local, 'doc_events_hooks'):
hooks = get_hooks('doc_events', {})
out = {}
for key, value in iteritems(hooks):
if isinstance(key, tuple):
for doctype in key:
append_hook(out, doctype, value)
else:
append_hook(out, key, value)
local.doc_events_hooks = out
return local.doc_events_hooks
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
print('Could not find app "{0}"'.format(app_name))
if not request:
sys.exit(1)
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def append_hook(target, key, value):
'''appends a hook to the the target dict.
If the hook key, exists, it will make it a key.
If the hook value is a dict, like doc_events, it will
listify the values against the key.
'''
if isinstance(value, dict):
# dict? make a list of values against each key
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
# make a list
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
if isinstance(path, text_type):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return as_unicode(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if isinstance(fn, basestring):
fn = get_attr(fn)
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties).
If doctype is not specified, it will create a property setter for all fields with the
given fieldname"""
args = _dict(args)
if not args.doctype_or_field:
args.doctype_or_field = 'DocField'
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data'
if not args.doctype:
doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname)
else:
doctype_list = [args.doctype]
for doctype in doctype_list:
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data'
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field,
'doc_type': doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.validate_fieldtype_change()
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import Tool."""
from frappe.core.page.data_import_tool import data_import_tool
data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by']
if not local.flags.in_test:
fields_to_clear.append("docstatus")
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.set("__islocal", 1)
for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']):
newdoc.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for i, d in enumerate(newdoc.get_all_children()):
d.set("__islocal", 1)
for fieldname in fields_to_clear:
d.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None,
context=None, indicator_color=None, primary_action='/', primary_label = None, fullpage=False):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code
:param context: web template context
:param indicator_color: color of indicator in title
:param primary_action: route on primary button (default is `/`)
:param primary_label: label on primary button (defaut is "Home")
:param fullpage: hide header / footer"""
local.message_title = title
local.message = html
local.response['type'] = 'page'
local.response['route'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
if not context:
context = {}
if not indicator_color:
if success:
indicator_color = 'green'
elif http_status_code and http_status_code > 300:
indicator_color = 'red'
else:
indicator_color = 'blue'
context['indicator_color'] = indicator_color
context['primary_label'] = primary_label
context['primary_action'] = primary_action
context['error_code'] = http_status_code
context['fullpage'] = fullpage
local.response['context'] = context
def redirect_to_message(title, html, http_status_code=None, context=None, indicator_color=None):
"""Redirects to /message?id=random
Similar to respond_as_web_page, but used to 'redirect' and show message pages like success, failure, etc. with a detailed message
:param title: Page title and heading.
:param message: Message to be shown.
:param http_status_code: HTTP status code.
Example Usage:
frappe.redirect_to_message(_('Thank you'), "<div><p>You will receive an email at test@example.com</p></div>")
"""
message_id = generate_hash(length=8)
message = {
'context': context or {},
'http_status_code': http_status_code or 200
}
message['context'].update({
'header': title,
'title': title,
'message': html
})
if indicator_color:
message['context'].update({
"indicator_color": indicator_color
})
cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60)
location = '/message?id={0}'.format(message_id)
if not getattr(local, 'is_ajax', False):
local.response["type"] = "redirect"
local.response["location"] = location
else:
return location
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for conditions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler)
def are_emails_muted():
from utils import cint
return flags.mute_emails or cint(conf.get("mute_emails") or 0) or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def format(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output = None):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
local.form_dict.doc = doc
if not html:
html = build_page("printview")
if as_pdf:
return get_pdf(html, output = output)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
local.flags.ignore_print_permissions = True
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
return out
def publish_progress(*args, **kwargs):
"""Show the user progress for a long request
:param percent: Percent progress
:param title: Title
:param doctype: Optional, for DocType
:param name: Optional, for Document name
"""
import frappe.async
return frappe.async.publish_progress(*args, **kwargs)
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.async
return frappe.async.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def enqueue(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param event: this is passed to enable clearing of jobs from queues
:param async: (optional) if async=False, the method is executed immediately, else via a worker
:param job_name: (optional) can be used to name an enqueue call, which can be used to prevent duplicate calls
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue(*args, **kwargs)
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
loggers = {}
log_level = None
def logger(module=None, with_more_info=True):
'''Returns a python logger that uses StreamHandler'''
from frappe.utils.logger import get_logger
return get_logger(module or 'default', with_more_info=with_more_info)
def log_error(message=None, title=None):
'''Log error to Error Log'''
get_doc(dict(doctype='Error Log', error=str(message or get_traceback()),
method=title)).insert(ignore_permissions=True)
def get_desk_link(doctype, name):
return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype))
def bold(text):
return '<b>{0}</b>'.format(text)
def safe_eval(code, eval_globals=None, eval_locals=None):
'''A safer `eval`'''
whitelisted_globals = {
"int": int,
"float": float,
"long": long,
"round": round
}
if '__' in code:
throw('Illegal rule {0}. Cannot use "__"'.format(bold(code)))
if not eval_globals:
eval_globals = {}
eval_globals['__builtins__'] = {}
eval_globals.update(whitelisted_globals)
return eval(code, eval_globals, eval_locals)
def get_active_domains():
""" get the domains set in the Domain Settings as active domain """
active_domains = cache().hget("domains", "active_domains") or None
if active_domains is None:
domains = get_all("Has Domain", filters={ "parent": "Domain Settings" },
fields=["domain"], distinct=True)
active_domains = [row.get("domain") for row in domains]
active_domains.append("")
cache().hset("domains", "active_domains", active_domains)
return active_domains
def get_active_modules():
""" get the active modules from Module Def"""
active_modules = cache().hget("modules", "active_modules") or None
if active_modules is None:
domains = get_active_domains()
modules = get_all("Module Def", filters={"restrict_to_domain": ("in", domains)})
active_modules = [module.name for module in modules]
cache().hset("modules", "active_modules", active_modules)
return active_modules
def clear_domainification_cache():
_cache = cache()
_cache.delete_key("domains", "active_domains")
_cache.delete_key("modules", "active_modules")
def get_system_settings(key):
if not local.system_settings.has_key(key):
local.system_settings.update({key: db.get_single_value('System Settings', key)})
return local.system_settings.get(key)
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import pandas as pd
import numpy as np
import numpy.testing as npt
from skbio import DistanceMatrix, TreeNode
from skbio.io._fileobject import StringIO
from skbio.util._testing import assert_series_almost_equal
from skbio.diversity import (alpha_diversity, beta_diversity,
get_alpha_diversity_metrics,
get_beta_diversity_metrics)
from skbio.diversity.alpha import faith_pd, observed_otus
from skbio.diversity.beta import unweighted_unifrac, weighted_unifrac
from skbio.tree import DuplicateNodeError, MissingNodeError
class AlphaDiversityTests(TestCase):
def setUp(self):
self.table1 = np.array([[1, 3, 0, 1, 0],
[0, 2, 0, 4, 4],
[0, 0, 6, 2, 1],
[0, 0, 1, 1, 1]])
self.sids1 = list('ABCD')
self.oids1 = ['OTU%d' % i for i in range(1, 6)]
self.tree1 = TreeNode.read(StringIO(
u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
self.table2 = np.array([[1, 3],
[0, 2],
[0, 0]])
self.sids2 = list('xyz')
self.oids2 = ['OTU1', 'OTU5']
self.tree2 = TreeNode.read(StringIO(
u'(((((OTU1:42.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
u'0.0,(OTU4:0.75,OTU5:0.0001):1.25):0.0)root;'))
def test_invalid_input(self):
# number of ids doesn't match the number of samples
self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
self.table1, list('ABC'))
# unknown metric provided
self.assertRaises(ValueError, alpha_diversity, 'not-a-metric',
self.table1)
# 3-D list provided as input
self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
[[[43]]])
# negative counts
self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
[0, 3, -12, 42])
# additional kwargs
self.assertRaises(TypeError, alpha_diversity, 'observed_otus',
[0, 1], not_a_real_kwarg=42.0)
self.assertRaises(TypeError, alpha_diversity, 'faith_pd',
[0, 1], tree=self.tree1, otu_ids=['OTU1', 'OTU2'],
not_a_real_kwarg=42.0)
self.assertRaises(TypeError, alpha_diversity, faith_pd,
[0, 1], tree=self.tree1, otu_ids=['OTU1', 'OTU2'],
not_a_real_kwarg=42.0)
def test_invalid_input_phylogenetic(self):
# otu_ids not provided
self.assertRaises(ValueError, alpha_diversity, 'faith_pd', self.table1,
list('ABC'), tree=self.tree1)
# tree not provided
self.assertRaises(ValueError, alpha_diversity, 'faith_pd', self.table1,
list('ABC'), otu_ids=self.oids1)
# tree has duplicated tip ids
t = TreeNode.read(
StringIO(u'(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# unrooted tree as input
t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
u'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# count and OTU vectors are not equal length
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# tree with no branch lengths
t = TreeNode.read(
StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# some otu_ids not present in tree
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
def test_empty(self):
# empty vector
actual = alpha_diversity('observed_otus', np.array([], dtype=np.int64))
expected = pd.Series([0])
assert_series_almost_equal(actual, expected)
# array of empty vector
actual = alpha_diversity('observed_otus',
np.array([[]], dtype=np.int64))
expected = pd.Series([0])
assert_series_almost_equal(actual, expected)
# array of empty vectors
actual = alpha_diversity('observed_otus',
np.array([[], []], dtype=np.int64))
expected = pd.Series([0, 0])
assert_series_almost_equal(actual, expected)
# empty vector
actual = alpha_diversity('faith_pd', np.array([], dtype=np.int64),
tree=self.tree1, otu_ids=[])
expected = pd.Series([0.])
assert_series_almost_equal(actual, expected)
# array of empty vector
actual = alpha_diversity('faith_pd',
np.array([[]], dtype=np.int64),
tree=self.tree1, otu_ids=[])
expected = pd.Series([0.])
assert_series_almost_equal(actual, expected)
# array of empty vectors
actual = alpha_diversity('faith_pd',
np.array([[], []], dtype=np.int64),
tree=self.tree1, otu_ids=[])
expected = pd.Series([0., 0.])
assert_series_almost_equal(actual, expected)
def test_single_count_vector(self):
actual = alpha_diversity('observed_otus', np.array([1, 0, 2]))
expected = pd.Series([2])
assert_series_almost_equal(actual, expected)
actual = alpha_diversity('faith_pd', np.array([1, 3, 0, 1, 0]),
tree=self.tree1, otu_ids=self.oids1)
self.assertAlmostEqual(actual[0], 4.5)
def test_input_types(self):
list_result = alpha_diversity('observed_otus', [1, 3, 0, 1, 0])
array_result = alpha_diversity('observed_otus',
np.array([1, 3, 0, 1, 0]))
self.assertAlmostEqual(list_result[0], 3)
assert_series_almost_equal(list_result, array_result)
list_result = alpha_diversity('faith_pd', [1, 3, 0, 1, 0],
tree=self.tree1, otu_ids=self.oids1)
array_result = alpha_diversity('faith_pd', np.array([1, 3, 0, 1, 0]),
tree=self.tree1, otu_ids=self.oids1)
self.assertAlmostEqual(list_result[0], 4.5)
assert_series_almost_equal(list_result, array_result)
def test_observed_otus(self):
# expected values hand-calculated
expected = pd.Series([3, 3, 3, 3], index=self.sids1)
actual = alpha_diversity('observed_otus', self.table1, self.sids1)
assert_series_almost_equal(actual, expected)
# function passed instead of string
actual = alpha_diversity(observed_otus, self.table1, self.sids1)
assert_series_almost_equal(actual, expected)
# alt input table
expected = pd.Series([2, 1, 0], index=self.sids2)
actual = alpha_diversity('observed_otus', self.table2, self.sids2)
assert_series_almost_equal(actual, expected)
def test_faith_pd(self):
# calling faith_pd through alpha_diversity gives same results as
# calling it directly
expected = []
for e in self.table1:
expected.append(faith_pd(e, tree=self.tree1, otu_ids=self.oids1))
expected = pd.Series(expected)
actual = alpha_diversity('faith_pd', self.table1, tree=self.tree1,
otu_ids=self.oids1)
assert_series_almost_equal(actual, expected)
# alt input table and tree
expected = []
for e in self.table2:
expected.append(faith_pd(e, tree=self.tree2, otu_ids=self.oids2))
expected = pd.Series(expected)
actual = alpha_diversity('faith_pd', self.table2, tree=self.tree2,
otu_ids=self.oids2)
assert_series_almost_equal(actual, expected)
def test_no_ids(self):
# expected values hand-calculated
expected = pd.Series([3, 3, 3, 3])
actual = alpha_diversity('observed_otus', self.table1)
assert_series_almost_equal(actual, expected)
def test_optimized(self):
# calling optimized faith_pd gives same results as calling unoptimized
# version
optimized = alpha_diversity('faith_pd', self.table1, tree=self.tree1,
otu_ids=self.oids1)
unoptimized = alpha_diversity(faith_pd, self.table1, tree=self.tree1,
otu_ids=self.oids1)
assert_series_almost_equal(optimized, unoptimized)
class BetaDiversityTests(TestCase):
def setUp(self):
self.table1 = [[1, 5],
[2, 3],
[0, 1]]
self.sids1 = list('ABC')
self.tree1 = TreeNode.read(StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
self.oids1 = ['O1', 'O2']
self.table2 = [[23, 64, 14, 0, 0, 3, 1],
[0, 3, 35, 42, 0, 12, 1],
[0, 5, 5, 0, 40, 40, 0],
[44, 35, 9, 0, 1, 0, 0],
[0, 2, 8, 0, 35, 45, 1],
[0, 0, 25, 35, 0, 19, 0]]
self.sids2 = list('ABCDEF')
def test_invalid_input(self):
# number of ids doesn't match the number of samples
self.assertRaises(ValueError, beta_diversity, self.table1, list('AB'),
'euclidean')
# unknown metric provided
self.assertRaises(ValueError, beta_diversity, 'not-a-metric',
self.table1)
# 3-D list provided as input
self.assertRaises(ValueError, beta_diversity, 'euclidean',
[[[43]]])
# negative counts
self.assertRaises(ValueError, beta_diversity, 'euclidean',
[[0, 1, 3, 4], [0, 3, -12, 42]])
self.assertRaises(ValueError, beta_diversity, 'euclidean',
[[0, 1, 3, -4], [0, 3, 12, 42]])
# additional kwargs
self.assertRaises(TypeError, beta_diversity, 'euclidean',
[[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0)
self.assertRaises(TypeError, beta_diversity, 'unweighted_unifrac',
[[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0, tree=self.tree1,
otu_ids=['O1', 'O2', 'O3'])
self.assertRaises(TypeError, beta_diversity, 'weighted_unifrac',
[[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0, tree=self.tree1,
otu_ids=['O1', 'O2', 'O3'])
self.assertRaises(TypeError, beta_diversity, weighted_unifrac,
[[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0, tree=self.tree1,
otu_ids=['O1', 'O2', 'O3'])
def test_invalid_input_phylogenetic(self):
# otu_ids not provided
self.assertRaises(ValueError, beta_diversity, 'weighted_unifrac',
self.table1, list('ABC'), tree=self.tree1)
self.assertRaises(ValueError, beta_diversity, 'unweighted_unifrac',
self.table1, list('ABC'), tree=self.tree1)
# tree not provided
self.assertRaises(ValueError, beta_diversity, 'weighted_unifrac',
self.table1, list('ABC'), otu_ids=self.oids1)
self.assertRaises(ValueError, beta_diversity, 'unweighted_unifrac',
self.table1, list('ABC'), otu_ids=self.oids1)
# tree has duplicated tip ids
t = TreeNode.read(
StringIO(u'(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(DuplicateNodeError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# unrooted tree as input
t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
u'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# count and OTU vectors are not equal length
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# tree with no branch lengths
t = TreeNode.read(
StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# some otu_ids not present in tree
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(MissingNodeError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
def test_empty(self):
# array of empty vectors
actual = beta_diversity('euclidean',
np.array([[], []], dtype=np.int64),
ids=['a', 'b'])
expected_dm = DistanceMatrix([[0.0, 0.0], [0.0, 0.0]], ['a', 'b'])
npt.assert_array_equal(actual, expected_dm)
actual = beta_diversity('unweighted_unifrac',
np.array([[], []], dtype=np.int64),
ids=['a', 'b'], tree=self.tree1, otu_ids=[])
expected_dm = DistanceMatrix([[0.0, 0.0], [0.0, 0.0]], ['a', 'b'])
self.assertEqual(actual, expected_dm)
def test_input_types(self):
actual_array = beta_diversity('euclidean',
np.array([[1, 5], [2, 3]]),
ids=['a', 'b'])
actual_list = beta_diversity('euclidean',
[[1, 5], [2, 3]], ids=['a', 'b'])
self.assertEqual(actual_array, actual_list)
def test_euclidean(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
actual_dm = beta_diversity('euclidean', self.table1, self.sids1)
self.assertEqual(actual_dm.shape, (3, 3))
npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
npt.assert_almost_equal(actual_dm['A', 'B'], 2.23606798)
npt.assert_almost_equal(actual_dm['B', 'A'], 2.23606798)
npt.assert_almost_equal(actual_dm['A', 'C'], 4.12310563)
npt.assert_almost_equal(actual_dm['C', 'A'], 4.12310563)
npt.assert_almost_equal(actual_dm['B', 'C'], 2.82842712)
npt.assert_almost_equal(actual_dm['C', 'B'], 2.82842712)
actual_dm = beta_diversity('euclidean', self.table2, self.sids2)
expected_data = [
[0., 80.8455317, 84.0297566, 36.3042697, 86.0116271, 78.9176786],
[80.8455317, 0., 71.0844568, 74.4714710, 69.3397433, 14.422205],
[84.0297566, 71.0844568, 0., 77.2851861, 8.3066238, 60.7536007],
[36.3042697, 74.4714710, 77.2851861, 0., 78.7908624, 70.7389567],
[86.0116271, 69.3397433, 8.3066238, 78.7908624, 0., 58.4807660],
[78.9176786, 14.422205, 60.7536007, 70.7389567, 58.4807660, 0.]]
expected_dm = DistanceMatrix(expected_data, self.sids2)
for id1 in self.sids2:
for id2 in self.sids2:
npt.assert_almost_equal(actual_dm[id1, id2],
expected_dm[id1, id2], 6)
def test_braycurtis(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
actual_dm = beta_diversity('braycurtis', self.table1, self.sids1)
self.assertEqual(actual_dm.shape, (3, 3))
npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
npt.assert_almost_equal(actual_dm['A', 'B'], 0.27272727)
npt.assert_almost_equal(actual_dm['B', 'A'], 0.27272727)
npt.assert_almost_equal(actual_dm['A', 'C'], 0.71428571)
npt.assert_almost_equal(actual_dm['C', 'A'], 0.71428571)
npt.assert_almost_equal(actual_dm['B', 'C'], 0.66666667)
npt.assert_almost_equal(actual_dm['C', 'B'], 0.66666667)
actual_dm = beta_diversity('braycurtis', self.table2, self.sids2)
expected_data = [
[0., 0.78787879, 0.86666667, 0.30927835, 0.85714286, 0.81521739],
[0.78787879, 0., 0.78142077, 0.86813187, 0.75, 0.1627907],
[0.86666667, 0.78142077, 0., 0.87709497, 0.09392265, 0.71597633],
[0.30927835, 0.86813187, 0.87709497, 0., 0.87777778, 0.89285714],
[0.85714286, 0.75, 0.09392265, 0.87777778, 0., 0.68235294],
[0.81521739, 0.1627907, 0.71597633, 0.89285714, 0.68235294, 0.]]
expected_dm = DistanceMatrix(expected_data, self.sids2)
for id1 in self.sids2:
for id2 in self.sids2:
npt.assert_almost_equal(actual_dm[id1, id2],
expected_dm[id1, id2], 6)
def test_unweighted_unifrac(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
# expected values calculated by hand
dm1 = beta_diversity('unweighted_unifrac', self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
dm2 = beta_diversity(unweighted_unifrac, self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
self.assertEqual(dm1.shape, (3, 3))
self.assertEqual(dm1, dm2)
expected_data = [[0.0, 0.0, 0.25],
[0.0, 0.0, 0.25],
[0.25, 0.25, 0.0]]
expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
for id1 in self.sids1:
for id2 in self.sids1:
npt.assert_almost_equal(dm1[id1, id2],
expected_dm[id1, id2], 6)
def test_weighted_unifrac(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
# expected values calculated by hand
dm1 = beta_diversity('weighted_unifrac', self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
dm2 = beta_diversity(weighted_unifrac, self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
self.assertEqual(dm1.shape, (3, 3))
self.assertEqual(dm1, dm2)
expected_data = [
[0.0, 0.1750000, 0.12499999],
[0.1750000, 0.0, 0.3000000],
[0.12499999, 0.3000000, 0.0]]
expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
for id1 in self.sids1:
for id2 in self.sids1:
npt.assert_almost_equal(dm1[id1, id2],
expected_dm[id1, id2], 6)
def test_weighted_unifrac_normalized(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
# expected values calculated by hand
dm1 = beta_diversity('weighted_unifrac', self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1,
normalized=True)
dm2 = beta_diversity(weighted_unifrac, self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1,
normalized=True)
self.assertEqual(dm1.shape, (3, 3))
self.assertEqual(dm1, dm2)
expected_data = [
[0.0, 0.128834, 0.085714],
[0.128834, 0.0, 0.2142857],
[0.085714, 0.2142857, 0.0]]
expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
for id1 in self.sids1:
for id2 in self.sids1:
npt.assert_almost_equal(dm1[id1, id2],
expected_dm[id1, id2], 6)
def test_scipy_kwargs(self):
# confirm that p can be passed to SciPy's minkowski, and that it
# gives a different result than not passing it (the off-diagonal
# entries are not equal).
dm1 = beta_diversity('minkowski', self.table1, self.sids1)
dm2 = beta_diversity('minkowski', self.table1, self.sids1, p=42.0)
for id1 in self.sids1:
for id2 in self.sids1:
if id1 != id2:
self.assertNotEqual(dm1[id1, id2], dm2[id1, id2])
def test_alt_pdist_f(self):
# confirm that pdist_f is actually being used
def not_a_real_pdist(counts, metric):
return [[0.0, 42.0], [42.0, 0.0]]
dm1 = beta_diversity('unweighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity('weighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity(unweighted_unifrac, self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity("euclidean", self.table1,
pdist_f=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
def test_alt_pdist_f_uses_kwargs(self):
# confirm that pdist_kwargs is actually being used
def not_a_real_pdist(counts, metric, value):
return [[0.0, value], [value, 0.0]]
dm1 = beta_diversity('unweighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 99.9})
expected = DistanceMatrix([[0.0, 99.9], [99.9, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity('unweighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 97.9})
expected = DistanceMatrix([[0.0, 97.9], [97.9, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity('weighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 99.9})
expected = DistanceMatrix([[0.0, 99.9], [99.9, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity(unweighted_unifrac, self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 99.9})
expected = DistanceMatrix([[0.0, 99.9], [99.9, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity("euclidean", self.table1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 99.9})
expected = DistanceMatrix([[0.0, 99.9], [99.9, 0.0]])
self.assertEqual(dm1, expected)
def test_alt_pdist_f_invalid_kwargs(self):
def not_a_real_pdist(counts, metric):
return [[0.0, 42.0], [42.0, 0.0]]
# TypeError on extra pdist_kwarg
self.assertRaises(TypeError, beta_diversity, unweighted_unifrac,
self.table1, otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'not_a_kwarg': True})
def not_a_real_pdist(counts, metric, value):
return [[0.0, value], [value, 0.0]]
# TypeError on extra kwarg
self.assertRaises(TypeError, beta_diversity, unweighted_unifrac,
self.table1, otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 99.9, 'not_a_kwarg': True})
class MetricGetters(TestCase):
def test_get_alpha_diversity_metrics(self):
m = get_alpha_diversity_metrics()
# basic sanity checks
self.assertTrue('faith_pd' in m)
self.assertTrue('chao1' in m)
def test_get_alpha_diversity_metrics_sorted(self):
m = get_alpha_diversity_metrics()
n = sorted(list(m))
self.assertEqual(m, n)
def test_get_beta_diversity_metrics(self):
m = get_beta_diversity_metrics()
# basic sanity checks
self.assertTrue('unweighted_unifrac' in m)
self.assertTrue('weighted_unifrac' in m)
def test_get_beta_diversity_metrics_sorted(self):
m = get_beta_diversity_metrics()
n = sorted(list(m))
self.assertEqual(m, n)
if __name__ == "__main__":
main()
TST: addressed @jairideout's comment
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
import pandas as pd
import numpy as np
import numpy.testing as npt
import six
from skbio import DistanceMatrix, TreeNode
from skbio.io._fileobject import StringIO
from skbio.util._testing import assert_series_almost_equal
from skbio.diversity import (alpha_diversity, beta_diversity,
get_alpha_diversity_metrics,
get_beta_diversity_metrics)
from skbio.diversity.alpha import faith_pd, observed_otus
from skbio.diversity.beta import unweighted_unifrac, weighted_unifrac
from skbio.tree import DuplicateNodeError, MissingNodeError
class AlphaDiversityTests(TestCase):
def setUp(self):
self.table1 = np.array([[1, 3, 0, 1, 0],
[0, 2, 0, 4, 4],
[0, 0, 6, 2, 1],
[0, 0, 1, 1, 1]])
self.sids1 = list('ABCD')
self.oids1 = ['OTU%d' % i for i in range(1, 6)]
self.tree1 = TreeNode.read(StringIO(
u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
self.table2 = np.array([[1, 3],
[0, 2],
[0, 0]])
self.sids2 = list('xyz')
self.oids2 = ['OTU1', 'OTU5']
self.tree2 = TreeNode.read(StringIO(
u'(((((OTU1:42.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
u'0.0,(OTU4:0.75,OTU5:0.0001):1.25):0.0)root;'))
def test_invalid_input(self):
# number of ids doesn't match the number of samples
self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
self.table1, list('ABC'))
# unknown metric provided
self.assertRaises(ValueError, alpha_diversity, 'not-a-metric',
self.table1)
# 3-D list provided as input
self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
[[[43]]])
# negative counts
self.assertRaises(ValueError, alpha_diversity, 'observed_otus',
[0, 3, -12, 42])
# additional kwargs
self.assertRaises(TypeError, alpha_diversity, 'observed_otus',
[0, 1], not_a_real_kwarg=42.0)
self.assertRaises(TypeError, alpha_diversity, 'faith_pd',
[0, 1], tree=self.tree1, otu_ids=['OTU1', 'OTU2'],
not_a_real_kwarg=42.0)
self.assertRaises(TypeError, alpha_diversity, faith_pd,
[0, 1], tree=self.tree1, otu_ids=['OTU1', 'OTU2'],
not_a_real_kwarg=42.0)
def test_invalid_input_phylogenetic(self):
# otu_ids not provided
self.assertRaises(ValueError, alpha_diversity, 'faith_pd', self.table1,
list('ABC'), tree=self.tree1)
# tree not provided
self.assertRaises(ValueError, alpha_diversity, 'faith_pd', self.table1,
list('ABC'), otu_ids=self.oids1)
# tree has duplicated tip ids
t = TreeNode.read(
StringIO(u'(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# unrooted tree as input
t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
u'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# count and OTU vectors are not equal length
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# tree with no branch lengths
t = TreeNode.read(
StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
# some otu_ids not present in tree
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, alpha_diversity, 'faith_pd',
counts, otu_ids=otu_ids, tree=t)
def test_empty(self):
# empty vector
actual = alpha_diversity('observed_otus', np.array([], dtype=np.int64))
expected = pd.Series([0])
assert_series_almost_equal(actual, expected)
# array of empty vector
actual = alpha_diversity('observed_otus',
np.array([[]], dtype=np.int64))
expected = pd.Series([0])
assert_series_almost_equal(actual, expected)
# array of empty vectors
actual = alpha_diversity('observed_otus',
np.array([[], []], dtype=np.int64))
expected = pd.Series([0, 0])
assert_series_almost_equal(actual, expected)
# empty vector
actual = alpha_diversity('faith_pd', np.array([], dtype=np.int64),
tree=self.tree1, otu_ids=[])
expected = pd.Series([0.])
assert_series_almost_equal(actual, expected)
# array of empty vector
actual = alpha_diversity('faith_pd',
np.array([[]], dtype=np.int64),
tree=self.tree1, otu_ids=[])
expected = pd.Series([0.])
assert_series_almost_equal(actual, expected)
# array of empty vectors
actual = alpha_diversity('faith_pd',
np.array([[], []], dtype=np.int64),
tree=self.tree1, otu_ids=[])
expected = pd.Series([0., 0.])
assert_series_almost_equal(actual, expected)
def test_single_count_vector(self):
actual = alpha_diversity('observed_otus', np.array([1, 0, 2]))
expected = pd.Series([2])
assert_series_almost_equal(actual, expected)
actual = alpha_diversity('faith_pd', np.array([1, 3, 0, 1, 0]),
tree=self.tree1, otu_ids=self.oids1)
self.assertAlmostEqual(actual[0], 4.5)
def test_input_types(self):
list_result = alpha_diversity('observed_otus', [1, 3, 0, 1, 0])
array_result = alpha_diversity('observed_otus',
np.array([1, 3, 0, 1, 0]))
self.assertAlmostEqual(list_result[0], 3)
assert_series_almost_equal(list_result, array_result)
list_result = alpha_diversity('faith_pd', [1, 3, 0, 1, 0],
tree=self.tree1, otu_ids=self.oids1)
array_result = alpha_diversity('faith_pd', np.array([1, 3, 0, 1, 0]),
tree=self.tree1, otu_ids=self.oids1)
self.assertAlmostEqual(list_result[0], 4.5)
assert_series_almost_equal(list_result, array_result)
def test_observed_otus(self):
# expected values hand-calculated
expected = pd.Series([3, 3, 3, 3], index=self.sids1)
actual = alpha_diversity('observed_otus', self.table1, self.sids1)
assert_series_almost_equal(actual, expected)
# function passed instead of string
actual = alpha_diversity(observed_otus, self.table1, self.sids1)
assert_series_almost_equal(actual, expected)
# alt input table
expected = pd.Series([2, 1, 0], index=self.sids2)
actual = alpha_diversity('observed_otus', self.table2, self.sids2)
assert_series_almost_equal(actual, expected)
def test_faith_pd(self):
# calling faith_pd through alpha_diversity gives same results as
# calling it directly
expected = []
for e in self.table1:
expected.append(faith_pd(e, tree=self.tree1, otu_ids=self.oids1))
expected = pd.Series(expected)
actual = alpha_diversity('faith_pd', self.table1, tree=self.tree1,
otu_ids=self.oids1)
assert_series_almost_equal(actual, expected)
# alt input table and tree
expected = []
for e in self.table2:
expected.append(faith_pd(e, tree=self.tree2, otu_ids=self.oids2))
expected = pd.Series(expected)
actual = alpha_diversity('faith_pd', self.table2, tree=self.tree2,
otu_ids=self.oids2)
assert_series_almost_equal(actual, expected)
def test_no_ids(self):
# expected values hand-calculated
expected = pd.Series([3, 3, 3, 3])
actual = alpha_diversity('observed_otus', self.table1)
assert_series_almost_equal(actual, expected)
def test_optimized(self):
# calling optimized faith_pd gives same results as calling unoptimized
# version
optimized = alpha_diversity('faith_pd', self.table1, tree=self.tree1,
otu_ids=self.oids1)
unoptimized = alpha_diversity(faith_pd, self.table1, tree=self.tree1,
otu_ids=self.oids1)
assert_series_almost_equal(optimized, unoptimized)
class BetaDiversityTests(TestCase):
def setUp(self):
self.table1 = [[1, 5],
[2, 3],
[0, 1]]
self.sids1 = list('ABC')
self.tree1 = TreeNode.read(StringIO(
'((O1:0.25, O2:0.50):0.25, O3:0.75)root;'))
self.oids1 = ['O1', 'O2']
self.table2 = [[23, 64, 14, 0, 0, 3, 1],
[0, 3, 35, 42, 0, 12, 1],
[0, 5, 5, 0, 40, 40, 0],
[44, 35, 9, 0, 1, 0, 0],
[0, 2, 8, 0, 35, 45, 1],
[0, 0, 25, 35, 0, 19, 0]]
self.sids2 = list('ABCDEF')
def test_invalid_input(self):
# number of ids doesn't match the number of samples
error_msg = ("Number of rows in ``counts`` must be equal to number of"
" provided ``ids``.")
with six.assertRaisesRegex(self, ValueError, error_msg):
beta_diversity(self.table1, list('AB'), 'euclidean')
# unknown metric provided
error_msg = "Unknown Distance Metric: not-a-metric"
with six.assertRaisesRegex(self, ValueError, error_msg):
beta_diversity('not-a-metric', self.table1)
# 3-D list provided as input
error_msg = ("Only 1-D and 2-D array-like objects can be provided as "
"input. Provided object has 3 dimensions.")
with six.assertRaisesRegex(self, ValueError, error_msg):
beta_diversity('euclidean', [[[43]]])
# negative counts
error_msg = "Counts vector cannot contain negative values."
with six.assertRaisesRegex(self, ValueError, error_msg):
beta_diversity('euclidean', [[0, 1, 3, 4], [0, 3, -12, 42]])
with six.assertRaisesRegex(self, ValueError, error_msg):
beta_diversity('euclidean', [[0, 1, 3, -4], [0, 3, 12, 42]])
# additional kwargs
error_msg = ("pdist\(\) got an unexpected keyword argument "
"'not_a_real_kwarg'")
with six.assertRaisesRegex(self, TypeError, error_msg):
beta_diversity('euclidean', [[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0)
with six.assertRaisesRegex(self, TypeError, error_msg):
beta_diversity('unweighted_unifrac', [[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0, tree=self.tree1,
otu_ids=['O1', 'O2', 'O3'])
with six.assertRaisesRegex(self, TypeError, error_msg):
beta_diversity('weighted_unifrac', [[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0, tree=self.tree1,
otu_ids=['O1', 'O2', 'O3'])
error_msg = ("weighted_unifrac\(\) got an unexpected keyword argument "
"'not_a_real_kwarg'")
with six.assertRaisesRegex(self, TypeError, error_msg):
beta_diversity(weighted_unifrac, [[0, 1, 3], [0, 3, 12]],
not_a_real_kwarg=42.0, tree=self.tree1,
otu_ids=['O1', 'O2', 'O3'])
def test_invalid_input_phylogenetic(self):
# otu_ids not provided
self.assertRaises(ValueError, beta_diversity, 'weighted_unifrac',
self.table1, list('ABC'), tree=self.tree1)
self.assertRaises(ValueError, beta_diversity, 'unweighted_unifrac',
self.table1, list('ABC'), tree=self.tree1)
# tree not provided
self.assertRaises(ValueError, beta_diversity, 'weighted_unifrac',
self.table1, list('ABC'), otu_ids=self.oids1)
self.assertRaises(ValueError, beta_diversity, 'unweighted_unifrac',
self.table1, list('ABC'), otu_ids=self.oids1)
# tree has duplicated tip ids
t = TreeNode.read(
StringIO(u'(((((OTU2:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(DuplicateNodeError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# unrooted tree as input
t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
u'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# count and OTU vectors are not equal length
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# tree with no branch lengths
t = TreeNode.read(
StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(ValueError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
# some otu_ids not present in tree
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, beta_diversity,
'weighted_unifrac', counts, otu_ids=otu_ids, tree=t)
self.assertRaises(MissingNodeError, beta_diversity,
'unweighted_unifrac', counts, otu_ids=otu_ids,
tree=t)
def test_empty(self):
# array of empty vectors
actual = beta_diversity('euclidean',
np.array([[], []], dtype=np.int64),
ids=['a', 'b'])
expected_dm = DistanceMatrix([[0.0, 0.0], [0.0, 0.0]], ['a', 'b'])
npt.assert_array_equal(actual, expected_dm)
actual = beta_diversity('unweighted_unifrac',
np.array([[], []], dtype=np.int64),
ids=['a', 'b'], tree=self.tree1, otu_ids=[])
expected_dm = DistanceMatrix([[0.0, 0.0], [0.0, 0.0]], ['a', 'b'])
self.assertEqual(actual, expected_dm)
def test_input_types(self):
actual_array = beta_diversity('euclidean',
np.array([[1, 5], [2, 3]]),
ids=['a', 'b'])
actual_list = beta_diversity('euclidean',
[[1, 5], [2, 3]], ids=['a', 'b'])
self.assertEqual(actual_array, actual_list)
def test_euclidean(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
actual_dm = beta_diversity('euclidean', self.table1, self.sids1)
self.assertEqual(actual_dm.shape, (3, 3))
npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
npt.assert_almost_equal(actual_dm['A', 'B'], 2.23606798)
npt.assert_almost_equal(actual_dm['B', 'A'], 2.23606798)
npt.assert_almost_equal(actual_dm['A', 'C'], 4.12310563)
npt.assert_almost_equal(actual_dm['C', 'A'], 4.12310563)
npt.assert_almost_equal(actual_dm['B', 'C'], 2.82842712)
npt.assert_almost_equal(actual_dm['C', 'B'], 2.82842712)
actual_dm = beta_diversity('euclidean', self.table2, self.sids2)
expected_data = [
[0., 80.8455317, 84.0297566, 36.3042697, 86.0116271, 78.9176786],
[80.8455317, 0., 71.0844568, 74.4714710, 69.3397433, 14.422205],
[84.0297566, 71.0844568, 0., 77.2851861, 8.3066238, 60.7536007],
[36.3042697, 74.4714710, 77.2851861, 0., 78.7908624, 70.7389567],
[86.0116271, 69.3397433, 8.3066238, 78.7908624, 0., 58.4807660],
[78.9176786, 14.422205, 60.7536007, 70.7389567, 58.4807660, 0.]]
expected_dm = DistanceMatrix(expected_data, self.sids2)
for id1 in self.sids2:
for id2 in self.sids2:
npt.assert_almost_equal(actual_dm[id1, id2],
expected_dm[id1, id2], 6)
def test_braycurtis(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
actual_dm = beta_diversity('braycurtis', self.table1, self.sids1)
self.assertEqual(actual_dm.shape, (3, 3))
npt.assert_almost_equal(actual_dm['A', 'A'], 0.0)
npt.assert_almost_equal(actual_dm['B', 'B'], 0.0)
npt.assert_almost_equal(actual_dm['C', 'C'], 0.0)
npt.assert_almost_equal(actual_dm['A', 'B'], 0.27272727)
npt.assert_almost_equal(actual_dm['B', 'A'], 0.27272727)
npt.assert_almost_equal(actual_dm['A', 'C'], 0.71428571)
npt.assert_almost_equal(actual_dm['C', 'A'], 0.71428571)
npt.assert_almost_equal(actual_dm['B', 'C'], 0.66666667)
npt.assert_almost_equal(actual_dm['C', 'B'], 0.66666667)
actual_dm = beta_diversity('braycurtis', self.table2, self.sids2)
expected_data = [
[0., 0.78787879, 0.86666667, 0.30927835, 0.85714286, 0.81521739],
[0.78787879, 0., 0.78142077, 0.86813187, 0.75, 0.1627907],
[0.86666667, 0.78142077, 0., 0.87709497, 0.09392265, 0.71597633],
[0.30927835, 0.86813187, 0.87709497, 0., 0.87777778, 0.89285714],
[0.85714286, 0.75, 0.09392265, 0.87777778, 0., 0.68235294],
[0.81521739, 0.1627907, 0.71597633, 0.89285714, 0.68235294, 0.]]
expected_dm = DistanceMatrix(expected_data, self.sids2)
for id1 in self.sids2:
for id2 in self.sids2:
npt.assert_almost_equal(actual_dm[id1, id2],
expected_dm[id1, id2], 6)
def test_unweighted_unifrac(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
# expected values calculated by hand
dm1 = beta_diversity('unweighted_unifrac', self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
dm2 = beta_diversity(unweighted_unifrac, self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
self.assertEqual(dm1.shape, (3, 3))
self.assertEqual(dm1, dm2)
expected_data = [[0.0, 0.0, 0.25],
[0.0, 0.0, 0.25],
[0.25, 0.25, 0.0]]
expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
for id1 in self.sids1:
for id2 in self.sids1:
npt.assert_almost_equal(dm1[id1, id2],
expected_dm[id1, id2], 6)
def test_weighted_unifrac(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
# expected values calculated by hand
dm1 = beta_diversity('weighted_unifrac', self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
dm2 = beta_diversity(weighted_unifrac, self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1)
self.assertEqual(dm1.shape, (3, 3))
self.assertEqual(dm1, dm2)
expected_data = [
[0.0, 0.1750000, 0.12499999],
[0.1750000, 0.0, 0.3000000],
[0.12499999, 0.3000000, 0.0]]
expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
for id1 in self.sids1:
for id2 in self.sids1:
npt.assert_almost_equal(dm1[id1, id2],
expected_dm[id1, id2], 6)
def test_weighted_unifrac_normalized(self):
# TODO: update npt.assert_almost_equal calls to use DistanceMatrix
# near-equality testing when that support is available
# expected values calculated by hand
dm1 = beta_diversity('weighted_unifrac', self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1,
normalized=True)
dm2 = beta_diversity(weighted_unifrac, self.table1, self.sids1,
otu_ids=self.oids1, tree=self.tree1,
normalized=True)
self.assertEqual(dm1.shape, (3, 3))
self.assertEqual(dm1, dm2)
expected_data = [
[0.0, 0.128834, 0.085714],
[0.128834, 0.0, 0.2142857],
[0.085714, 0.2142857, 0.0]]
expected_dm = DistanceMatrix(expected_data, ids=self.sids1)
for id1 in self.sids1:
for id2 in self.sids1:
npt.assert_almost_equal(dm1[id1, id2],
expected_dm[id1, id2], 6)
def test_scipy_kwargs(self):
# confirm that p can be passed to SciPy's minkowski, and that it
# gives a different result than not passing it (the off-diagonal
# entries are not equal).
dm1 = beta_diversity('minkowski', self.table1, self.sids1)
dm2 = beta_diversity('minkowski', self.table1, self.sids1, p=42.0)
for id1 in self.sids1:
for id2 in self.sids1:
if id1 != id2:
self.assertNotEqual(dm1[id1, id2], dm2[id1, id2])
def test_alt_pdist_f(self):
# confirm that pdist_f is actually being used
def not_a_real_pdist(counts, metric):
return [[0.0, 42.0], [42.0, 0.0]]
dm1 = beta_diversity('unweighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity('weighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity(unweighted_unifrac, self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity("euclidean", self.table1,
pdist_f=not_a_real_pdist)
expected = DistanceMatrix([[0.0, 42.0], [42.0, 0.0]])
self.assertEqual(dm1, expected)
def test_alt_pdist_f_uses_kwargs(self):
# confirm that pdist_kwargs is actually being used
def not_a_real_pdist(counts, metric, value):
return [[0.0, value], [value, 0.0]]
dm1 = beta_diversity('unweighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 99.9})
expected = DistanceMatrix([[0.0, 99.9], [99.9, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity('unweighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 97.9})
expected = DistanceMatrix([[0.0, 97.9], [97.9, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity('weighted_unifrac', self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 99.9})
expected = DistanceMatrix([[0.0, 99.9], [99.9, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity(unweighted_unifrac, self.table1,
otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 99.9})
expected = DistanceMatrix([[0.0, 99.9], [99.9, 0.0]])
self.assertEqual(dm1, expected)
dm1 = beta_diversity("euclidean", self.table1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 99.9})
expected = DistanceMatrix([[0.0, 99.9], [99.9, 0.0]])
self.assertEqual(dm1, expected)
def test_alt_pdist_f_invalid_kwargs(self):
def not_a_real_pdist(counts, metric):
return [[0.0, 42.0], [42.0, 0.0]]
# TypeError on extra pdist_kwarg
self.assertRaises(TypeError, beta_diversity, unweighted_unifrac,
self.table1, otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'not_a_kwarg': True})
def not_a_real_pdist(counts, metric, value):
return [[0.0, value], [value, 0.0]]
# TypeError on extra kwarg
self.assertRaises(TypeError, beta_diversity, unweighted_unifrac,
self.table1, otu_ids=self.oids1, tree=self.tree1,
pdist_f=not_a_real_pdist,
pdist_kwargs={'value': 99.9, 'not_a_kwarg': True})
class MetricGetters(TestCase):
def test_get_alpha_diversity_metrics(self):
m = get_alpha_diversity_metrics()
# basic sanity checks
self.assertTrue('faith_pd' in m)
self.assertTrue('chao1' in m)
def test_get_alpha_diversity_metrics_sorted(self):
m = get_alpha_diversity_metrics()
n = sorted(list(m))
self.assertEqual(m, n)
def test_get_beta_diversity_metrics(self):
m = get_beta_diversity_metrics()
# basic sanity checks
self.assertTrue('unweighted_unifrac' in m)
self.assertTrue('weighted_unifrac' in m)
def test_get_beta_diversity_metrics_sorted(self):
m = get_beta_diversity_metrics()
n = sorted(list(m))
self.assertEqual(m, n)
if __name__ == "__main__":
main()
|
import semver
import virtool.github
def format_hmm_release(updated, release, installed):
# The release dict will only be replaced if there is a 200 response from GitHub. A 304 indicates the release
# has not changed and `None` is returned from `get_release()`.
if updated is None:
return None
formatted = virtool.github.format_release(release)
formatted["newer"] = bool(
release is None or installed is None or (
installed and
semver.compare(release["name"].lstrip("v"), installed["name"].lstrip("v")) == 1
)
)
return formatted
Fix HMM release formatting bug
import semver
import virtool.github
def format_hmm_release(updated, release, installed):
# The release dict will only be replaced if there is a 200 response from GitHub. A 304 indicates the release
# has not changed and `None` is returned from `get_release()`.
if updated is None:
return None
formatted = virtool.github.format_release(updated)
formatted["newer"] = bool(
release is None or installed is None or (
installed and
semver.compare(formatted["name"].lstrip("v"), installed["name"].lstrip("v")) == 1
)
)
return formatted
|
class Car(object):
condition = "new"
def __init__(self, model, color, mpg):
self.model = model
self.color = color
self.mpg = mpg
def display_car(self):
print "This is a %s %s with %s MPG." % (self.color, self.model, str(self.mpg))
my_car = Car("DeLorean", "silver", 88)
print my_car.condition
my_car.display_car()
Add a sub class for car
class Car(object):
condition = "new"
def __init__(self, model, color, mpg):
self.model = model
self.color = color
self.mpg = mpg
def display_car(self):
print "This is a %s %s with %s MPG." % (self.color, self.model, str(self.mpg))
def drive_car(self):
self.condition = "used"
my_car = Car("DeLorean", "silver", 88)
print my_car.condition
my_car.drive_car()
print my_car.condition
class ElectricCar(Car):
def __init__(self, battery_type, model, color, mpg):
super(ElectricCar, self).__init__(model, color, mpg)
self.battery_type = battery_type
my_car = ElectricCar("molten salt", "Benz", "Black", 120)
|
import numpy as np
import scipy as sp
import openpnm as op
class DelaunayGabrielTest:
def setup_class(self):
pass
def teardown_class(self):
pass
# def test_gabriel_and_delaunay_cubic(self):
# np.random.seed(0)
# dn = op.network.Delaunay(shape=[1, 1, 1], points=50)
# np.random.seed(0)
# gn = op.network.Gabriel(shape=[1, 1, 1], points=50)
# assert gn.Nt < dn.Nt
# # assert gn.num_pores(['internal', 'surface'], mode='union') == 50
# # assert dn.num_pores(['internal', 'surface'], mode='union') == 50
# # assert gn.num_pores('boundary') == 75
# def test_gabriel_and_delaunay_square(self):
# np.random.seed(0)
# dn = op.network.Delaunay(shape=[1, 1, 0], points=50)
# np.random.seed(0)
# gn = op.network.Gabriel(shape=[1, 1, 0], points=50)
# assert gn.Nt < dn.Nt
# # assert gn.num_pores(['internal', 'surface'], mode='union') == 50
# # assert dn.num_pores(['internal', 'surface'], mode='union') == 50
# # assert gn.num_pores('boundary') == 24
# def test_add_boundary_pores(self):
# np.random.seed(0)
# dn = op.network.Delaunay(shape=[1, 1, 1], points=50)
# # dn.add_boundary_pores(offset=0.1)
# # assert np.all(np.amin(dn['pore.coords']) == -0.1)
# # assert np.all(np.amax(dn['pore.coords']) == 1.1)
if __name__ == '__main__':
t = DelaunayGabrielTest()
t.setup_class()
self = t
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
readding delaunay tests
import numpy as np
import scipy as sp
import openpnm as op
class DelaunayGabrielTest:
def setup_class(self):
pass
def teardown_class(self):
pass
def test_gabriel_and_delaunay_cubic(self):
np.random.seed(0)
dn = op.network.Delaunay(shape=[1, 1, 1], points=50)
# assert dn.num_pores(['internal', 'surface'], mode='union') == 50
def test_gabriel_and_delaunay_square(self):
np.random.seed(0)
dn = op.network.Delaunay(shape=[1, 1, 0], points=50)
# assert dn.num_pores(['internal', 'surface'], mode='union') == 50
if __name__ == '__main__':
t = DelaunayGabrielTest()
t.setup_class()
self = t
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
|
from flask.ext.security.core import current_user
from flask import request, jsonify
from . import route
from ..models import User
from ..core import db
from .datatables import DataTables
from .index import bp
@route(bp, '/users', methods = ['GET', 'POST'])
def users():
column_whitelist = {
'id' : True,
'username' : True,
'lastName' : True,
'firstName' : True,
'email' : True,
'isActive' : True,
'last_login' : True,
'current_login' : True,
'last_login_ip' : True,
'current_login_ip' : True,
'login_count' : True,
'role' : True,
}
query = db.session.query(User)
rowTable = DataTables(request, User, query)
return jsonify(rowTable.output_result())
added all fields to whitelist
from flask.ext.security.core import current_user
from flask import request, jsonify
from . import route
from ..models import User
from ..core import db
from .datatables import DataTables
from .index import bp
@route(bp, '/users', methods = ['GET', 'POST'])
def users():
column_whitelist = {
'id' : True,
'email' : True,
'username' : True,
'lastName' : True,
'firstName' : True,
'active' : True,
'confirmed_at' : True,
'last_login_at' : True,
'current_login_at' : True,
'last_login_ip' : True,
'current_login_ip' : True,
'login_count' : True,
'registered_at' : True,
'roles' : True,
}
query = db.session.query(User)
rowTable = DataTables(request, User, query)
return jsonify(rowTable.output_result())
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import string
import sys
import tempfile
import unittest
import json
from telemetry import decorators
from telemetry import project_config
from telemetry.core import util
from telemetry.internal.util import binary_manager
from telemetry.testing import browser_test_context
from telemetry.testing import browser_test_runner
from telemetry.testing import options_for_unittests
from telemetry.testing import run_browser_tests
from telemetry.testing import serially_executed_browser_test_case
_expectations_template = (
'%s'
'# results: [ %s ]\n'
'crbug.com/123 [ %s ] %s [ %s ]')
def _MakeTestExpectations(test_name, tag_list, expectations):
tag_header = ''.join('# tags: [ %s ]\n' % t for t in tag_list)
tags = ' '.join(tag_list)
return _expectations_template % (
tag_header, expectations, tags, test_name, expectations)
def _MakeTestFilter(tests):
return '::'.join(tests)
class BrowserTestRunnerTest(unittest.TestCase):
def setUp(self):
self._test_result = {}
def _ExtractTestResults(self, test_result):
delimiter = test_result['path_delimiter']
failures = []
successes = []
skips = []
def _IsLeafNode(node):
test_dict = node[1]
return ('expected' in test_dict and
isinstance(test_dict['expected'], basestring))
node_queues = []
for t in test_result['tests']:
node_queues.append((t, test_result['tests'][t]))
while node_queues:
node = node_queues.pop()
full_test_name, test_dict = node
if _IsLeafNode(node):
if all(res not in test_dict['expected'].split() for res in
test_dict['actual'].split()):
failures.append(full_test_name)
elif test_dict['actual'] == 'SKIP':
skips.append(full_test_name)
else:
successes.append(full_test_name)
else:
for k in test_dict:
node_queues.append(
('%s%s%s' % (full_test_name, delimiter, k),
test_dict[k]))
return successes, failures, skips
def _RunTest(
self, test_filter, expected_failures, expected_successes,
expected_skips=None, test_name='SimpleTest',
expectations='', tags=None, extra_args=None):
expected_skips = expected_skips or []
tags = tags or []
extra_args = extra_args or []
config = project_config.ProjectConfig(
top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
client_configs=[],
benchmark_dirs=[
os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
)
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
temp_file_name = temp_file.name
if expectations:
expectations_file = tempfile.NamedTemporaryFile(delete=False)
expectations_file.write(expectations)
expectations_file.close()
extra_args.extend(['-X', expectations_file.name] +
['-x=%s' % tag for tag in tags])
args = ([test_name,
'--write-full-results-to=%s' % temp_file_name,
'--test-filter=%s' % test_filter] + extra_args)
try:
args = browser_test_runner.ProcessConfig(config, args)
with binary_manager.TemporarilyReplaceBinaryManager(None):
run_browser_tests.RunTests(args)
with open(temp_file_name) as f:
self._test_result = json.load(f)
(actual_successes,
actual_failures,
actual_skips) = self._ExtractTestResults(self._test_result)
self.assertEquals(set(actual_failures), set(expected_failures))
self.assertEquals(set(actual_successes), set(expected_successes))
self.assertEquals(set(actual_skips), set(expected_skips))
finally:
os.remove(temp_file_name)
@decorators.Disabled('chromeos') # crbug.com/696553
def testShortenTestNameUsingTestNamePrefixCommandLineArg(self):
self._RunTest(
test_filter='', expected_failures=[],
expected_successes=['FailingTest'],
test_name='ImplementsGetPlatformTags',
expectations=_MakeTestExpectations(
'FailingTest', ['linux', 'release'], 'Failure'),
extra_args=['--test-name-prefix=browser_tests.browser_test.'
'ImplementsGetPlatformTags.'])
test_result = (
self._test_result['tests']['FailingTest'])
self.assertEqual(test_result['expected'], 'FAIL')
@decorators.Disabled('chromeos') # crbug.com/696553
def testShortenSkipGlobUsingTestNamePrefixCommandLineArg(self):
self._RunTest(
test_filter='', expected_failures=[],
expected_successes=['a/b/fail-test.html'], expected_skips=[],
test_name='ImplementsExpectationsFiles',
extra_args=[
'-x=foo', '--test-name-prefix='
'browser_tests.browser_test.ImplementsExpectationsFiles.',
'--skip=a/b/fail-test.html', '--all'])
@decorators.Disabled('chromeos') # crbug.com/696553
def testShortenTestFilterGlobsUsingTestNamePrefixCommandLineArg(self):
self._RunTest(
test_filter='FailingTest', expected_failures=[],
expected_successes=['FailingTest'],
test_name='ImplementsGetPlatformTags',
expectations=_MakeTestExpectations(
'FailingTest', ['linux', 'release'], 'Failure'),
extra_args=[
'--test-name-prefix='
'browser_tests.browser_test.ImplementsGetPlatformTags.'])
@decorators.Disabled('chromeos') # crbug.com/696553
def testGetExpectationsFromTypWithoutExpectationsFile(self):
test_name = ('browser_tests.browser_test.'
'GetsExpectationsFromTyp.HasNoExpectationsFile')
self._RunTest(
test_filter=test_name, expected_failures=[],
expected_successes=[test_name], test_name='GetsExpectationsFromTyp')
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['GetsExpectationsFromTyp']['HasNoExpectationsFile'])
self.assertEqual(test_result['expected'], 'PASS')
self.assertEqual(test_result['actual'], 'PASS')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testGetExpectationsFromTypWithExpectationsFile(self):
test_name = 'HasExpectationsFile'
self._RunTest(
test_filter=test_name, expected_failures=[test_name],
expected_successes=[], test_name='GetsExpectationsFromTyp',
expectations=_MakeTestExpectations(
test_name, ['foo'], 'RetryOnFailure Failure'), tags=['foo'],
extra_args=[('--test-name-prefix=browser_tests.'
'browser_test.GetsExpectationsFromTyp.')])
test_result = self._test_result['tests']['HasExpectationsFile']
self.assertEqual(test_result['expected'], 'FAIL')
self.assertEqual(test_result['actual'], 'PASS')
self.assertIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testOverrideExpectationsFilesFunction(self):
test_name = ('a/b/fail-test.html')
self._RunTest(
test_filter=test_name, expected_failures=[],
expected_successes=[test_name],
test_name='ImplementsExpectationsFiles',
extra_args=[
'-x=foo',
'--test-name-prefix=browser_tests.browser_test.'
'ImplementsExpectationsFiles.'])
test_result = (
self._test_result['tests']['a']['b']['fail-test.html'])
self.assertEqual(self._test_result['path_delimiter'], '/')
self.assertEqual(test_result['expected'], 'FAIL')
self.assertEqual(test_result['actual'], 'FAIL')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testDoesRetryOnFailureRetriesAndEventuallyPasses(self):
test_name = 'a\\b\\c\\flaky-test.html'
extra_args = [
'--retry-limit=3', '--retry-only-retry-on-failure-tests',
'--test-name-prefix', 'browser_tests.browser_test.FlakyTest.']
self._RunTest(
test_filter=test_name, expected_failures=[],
expected_successes=[test_name], test_name='FlakyTest',
extra_args=extra_args, expectations=_MakeTestExpectations(
test_name, ['foo'], 'RetryOnFailure'), tags=['foo'])
results = (
self._test_result['tests']['a']['b']['c']['flaky-test.html'])
self.assertEqual(self._test_result['path_delimiter'], '\\')
self.assertEqual(results['expected'], 'PASS')
self.assertEqual(results['actual'], 'FAIL FAIL FAIL PASS')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestWithSkipExpectation(self):
test_name = ('browser_tests.browser_test'
'.TestsWillBeDisabled.SupposedToPass')
self._RunTest(
test_filter=test_name, expected_failures=[], expected_successes=[],
expected_skips=[test_name], test_name='TestsWillBeDisabled',
expectations=_MakeTestExpectations(
test_name, ['foo'], 'Skip'), tags=['foo'])
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['TestsWillBeDisabled']['SupposedToPass'])
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestViaCommandlineArgWhilePassingExpectationsFile(self):
test_name = ('browser_tests.browser_test'
'.TestsWillBeDisabled.SupposedToPass')
self._RunTest(
test_filter=test_name, expected_failures=[], expected_successes=[],
expected_skips=[test_name], test_name='TestsWillBeDisabled',
expectations=_MakeTestExpectations(
test_name, ['foo'], 'Failure'), tags=['foo'],
extra_args=['--skip=*SupposedToPass'])
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['TestsWillBeDisabled']['SupposedToPass'])
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestViaCommandLineArgWithoutExpectationsFile(self):
test_name = (
'browser_tests.browser_test.'
'TestsWillBeDisabled.SupposedToPass')
self._RunTest(
test_filter=test_name, expected_failures=[], expected_successes=[],
test_name='TestsWillBeDisabled',
expected_skips=[test_name],
extra_args=['--skip=*SupposedToPass'])
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['TestsWillBeDisabled']['SupposedToPass'])
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestWithoutExpectationsFile(self):
test_name = ('browser_tests.browser_test.'
'TestsWillBeDisabled.ThisTestSkips')
self._RunTest(
test_filter=test_name, expected_failures=[], expected_successes=[],
test_name='TestsWillBeDisabled',
expected_skips=[test_name])
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['TestsWillBeDisabled']['ThisTestSkips'])
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testOverrideGetPlatformTagsFunctionForFailureExpectations(self):
test_name = ('browser_tests.browser_test'
'.ImplementsGetPlatformTags.FailingTest')
self._RunTest(
test_filter=test_name, expected_failures=[],
expected_successes=[test_name],
test_name='ImplementsGetPlatformTags',
expectations=_MakeTestExpectations(
test_name, ['linux', 'release'], 'Failure'))
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['ImplementsGetPlatformTags']['FailingTest'])
self.assertEqual(test_result['expected'], 'FAIL')
self.assertEqual(test_result['actual'], 'FAIL')
@decorators.Disabled('chromeos') # crbug.com/696553
def testOverrideGetPlatformTagsFunctionForSkipExpectations(self):
test_name = ('browser_tests.browser_test'
'.ImplementsGetPlatformTags.FailingTest')
self._RunTest(
test_filter=test_name, expected_failures=[], expected_successes=[],
expected_skips=[test_name],
test_name='ImplementsGetPlatformTags',
expectations=_MakeTestExpectations(
test_name, ['linux', 'release'], 'Skip'))
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['ImplementsGetPlatformTags']['FailingTest'])
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
@decorators.Disabled('chromeos') # crbug.com/696553
def testJsonOutputFormatNegativeFilter(self):
failures = [
'browser_tests.simple_numeric_test.SimpleTest.add_1_and_2',
'browser_tests.simple_numeric_test.SimpleTest.add_7_and_3',
'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple_2']
successes = [
'browser_tests.simple_numeric_test.SimpleTest.add_2_and_3',
'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple',
'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple_3']
self._RunTest(
_MakeTestFilter(failures + successes), failures, successes)
@decorators.Disabled('chromeos') # crbug.com/696553
def testJsonOutputWhenSetupClassFailed(self):
failures = [
'browser_tests.failed_tests.SetUpClassFailedTest.dummy_test_0',
'browser_tests.failed_tests.SetUpClassFailedTest.dummy_test_1',
'browser_tests.failed_tests.SetUpClassFailedTest.dummy_test_2']
self._RunTest(
_MakeTestFilter(failures), failures, [],
test_name='SetUpClassFailedTest')
@decorators.Disabled('chromeos') # crbug.com/696553
def testJsonOutputWhenTearDownClassFailed(self):
successes = [
'browser_tests.failed_tests.TearDownClassFailedTest.dummy_test_0',
'browser_tests.failed_tests.TearDownClassFailedTest.dummy_test_1',
'browser_tests.failed_tests.TearDownClassFailedTest.dummy_test_2']
self._RunTest(
_MakeTestFilter(successes), successes, [],
test_name='TearDownClassFailedTest')
@decorators.Disabled('chromeos') # crbug.com/696553
def testSetUpProcessCalledOnce(self):
successes = [
'browser_tests.process_tests.FailIfSetUpProcessCalledTwice.Dummy_0',
'browser_tests.process_tests.FailIfSetUpProcessCalledTwice.Dummy_1',
'browser_tests.process_tests.FailIfSetUpProcessCalledTwice.Dummy_2']
self._RunTest(
_MakeTestFilter(successes), [], successes,
test_name='FailIfSetUpProcessCalledTwice')
@decorators.Disabled('chromeos') # crbug.com/696553
def testTearDownProcessCalledOnce(self):
successes = [
'browser_tests.process_tests.FailIfTearDownProcessCalledTwice.Dummy_0',
'browser_tests.process_tests.FailIfTearDownProcessCalledTwice.Dummy_1',
'browser_tests.process_tests.FailIfTearDownProcessCalledTwice.Dummy_2']
self._RunTest(
_MakeTestFilter(successes), [], successes,
test_name='FailIfTearDownProcessCalledTwice')
@decorators.Disabled('chromeos') # crbug.com/696553
def testJsonOutputFormatPositiveFilter(self):
failures = [
'browser_tests.simple_numeric_test.SimpleTest.TestException',
'browser_tests.simple_numeric_test.SimpleTest.TestSimple']
self._RunTest(
_MakeTestFilter(failures), failures, [])
@decorators.Disabled('chromeos') # crbug.com/696553
def testExecutingTestsInSortedOrder(self):
alphabetical_tests = []
prefix = 'browser_tests.simple_numeric_test.SimpleTest.Alphabetical_'
for i in xrange(20):
alphabetical_tests.append(prefix + str(i))
for c in string.uppercase[:26]:
alphabetical_tests.append(prefix + c)
for c in string.lowercase[:26]:
alphabetical_tests.append(prefix + c)
alphabetical_tests.sort()
self._RunTest(
prefix + '*', [], alphabetical_tests)
def shardingRangeTestHelper(self, total_shards, num_tests):
shard_indices = []
for shard_index in xrange(0, total_shards):
shard_indices.append(run_browser_tests._TestIndicesForShard(
total_shards, shard_index, num_tests))
# Make assertions about ranges
num_tests_run = 0
for i in xrange(0, len(shard_indices)):
cur_indices = shard_indices[i]
num_tests_in_shard = len(cur_indices)
if i < num_tests:
self.assertGreater(num_tests_in_shard, 0)
num_tests_run += num_tests_in_shard
else:
# Not enough tests to go around all of the shards.
self.assertEquals(num_tests_in_shard, 0)
# Assert that we run all of the tests exactly once.
all_indices = set()
for i in xrange(0, len(shard_indices)):
cur_indices = shard_indices[i]
all_indices.update(cur_indices)
self.assertEquals(num_tests_run, num_tests)
self.assertEquals(num_tests_run, len(all_indices))
def testShardsWithPrimeNumTests(self):
for total_shards in xrange(1, 20):
# Nice non-prime number
self.shardingRangeTestHelper(total_shards, 101)
def testShardsWithDivisibleNumTests(self):
for total_shards in xrange(1, 6):
self.shardingRangeTestHelper(total_shards, 8)
def testShardBoundaryConditions(self):
self.shardingRangeTestHelper(1, 0)
self.shardingRangeTestHelper(1, 1)
self.shardingRangeTestHelper(2, 1)
def BaseShardingTest(self, total_shards, shard_index, failures, successes,
opt_abbr_input_json_file=None,
opt_test_filter='',
opt_filter_tests_after_sharding=False,
opt_test_name_prefix=''):
config = project_config.ProjectConfig(
top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
client_configs=[],
benchmark_dirs=[
os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
)
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
temp_file_name = temp_file.name
opt_args = []
if opt_abbr_input_json_file:
opt_args += [
'--read-abbreviated-json-results-from=%s' % opt_abbr_input_json_file]
if opt_test_filter:
opt_args += [
'--test-filter=%s' % opt_test_filter]
if opt_filter_tests_after_sharding:
opt_args += ['--filter-tests-after-sharding']
if opt_test_name_prefix:
opt_args += ['--test-name-prefix=%s' % opt_test_name_prefix]
args = (['SimpleShardingTest',
'--write-full-results-to=%s' % temp_file_name,
'--total-shards=%d' % total_shards,
'--shard-index=%d' % shard_index] + opt_args)
try:
args = browser_test_runner.ProcessConfig(config, args)
with binary_manager.TemporarilyReplaceBinaryManager(None):
run_browser_tests.RunTests(args)
with open(temp_file_name) as f:
test_result = json.load(f)
(actual_successes,
actual_failures, _) = self._ExtractTestResults(test_result)
self.assertEquals(set(actual_failures), set(failures))
self.assertEquals(set(actual_successes), set(successes))
finally:
os.remove(temp_file_name)
@decorators.Disabled('chromeos') # crbug.com/696553
def testShardedTestRun(self):
self.BaseShardingTest(3, 0, [], [
'browser_tests.simple_sharding_test.SimpleShardingTest.Test1',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_0',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_3',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_6',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_9',
])
self.BaseShardingTest(3, 1, [], [
'browser_tests.simple_sharding_test.SimpleShardingTest.Test2',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_1',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_4',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_7',
])
self.BaseShardingTest(3, 2, [], [
'browser_tests.simple_sharding_test.SimpleShardingTest.Test3',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_2',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_5',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_8',
])
def writeMockTestResultsFile(self):
mock_test_results = {
'passes': [
'Test1',
'Test2',
'Test3',
'passing_test_0',
'passing_test_1',
'passing_test_2',
'passing_test_3',
'passing_test_4',
'passing_test_5',
'passing_test_6',
'passing_test_7',
'passing_test_8',
'passing_test_9',
],
'failures': [],
'valid': True,
'times': {
'Test1': 3.0,
'Test2': 3.0,
'Test3': 3.0,
'passing_test_0': 3.0,
'passing_test_1': 2.0,
'passing_test_2': 2.0,
'passing_test_3': 2.0,
'passing_test_4': 2.0,
'passing_test_5': 1.0,
'passing_test_6': 1.0,
'passing_test_7': 1.0,
'passing_test_8': 1.0,
'passing_test_9': 0.5,
}
}
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
temp_file_name = temp_file.name
with open(temp_file_name, 'w') as f:
json.dump(mock_test_results, f)
return temp_file_name
@decorators.Disabled('chromeos') # crbug.com/696553
def testSplittingShardsByTimes(self):
temp_file_name = self.writeMockTestResultsFile()
# It seems that the sorting order of the first four tests above is:
# passing_test_0, Test1, Test2, Test3
# This is probably because the relative order of the "fixed" tests
# (starting with "Test") and the generated ones ("passing_") is
# not well defined, and the sorting is stable afterward. The
# expectations have been adjusted for this fact.
try:
self.BaseShardingTest(4, 0, [], [
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_0',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_1',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_5',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_9'
], temp_file_name)
self.BaseShardingTest(4, 1, [], [
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.Test1',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_2',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_6'
], temp_file_name)
self.BaseShardingTest(4, 2, [], [
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.Test2',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_3',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_7'
], temp_file_name)
self.BaseShardingTest(4, 3, [], [
'browser_tests.simple_sharding_test.SimpleShardingTest.Test3',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_4',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_8'
], temp_file_name)
finally:
os.remove(temp_file_name)
@decorators.Disabled('chromeos') # crbug.com/696553
def testFilterTestShortenedNameAfterShardingWithoutTestTimes(self):
self.BaseShardingTest(
4, 0, [], ['passing_test_8'],
opt_test_name_prefix=('browser_tests.'
'simple_sharding_test.SimpleShardingTest.'),
opt_test_filter='passing_test_8',
opt_filter_tests_after_sharding=True)
@decorators.Disabled('chromeos') # crbug.com/696553
def testFilterTestShortenedNameAfterShardingWithTestTimes(self):
temp_file_name = self.writeMockTestResultsFile()
try:
self.BaseShardingTest(
4, 3, [], ['passing_test_8'], temp_file_name,
opt_test_name_prefix=('browser_tests.'
'simple_sharding_test.SimpleShardingTest.'),
opt_test_filter='passing_test_8',
opt_filter_tests_after_sharding=True)
finally:
os.remove(temp_file_name)
@decorators.Disabled('chromeos') # crbug.com/696553
def testFilteringAfterSharding(self):
temp_file_name = self.writeMockTestResultsFile()
successes = [
'browser_tests.simple_sharding_test.SimpleShardingTest.Test1',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_2',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_6']
try:
self.BaseShardingTest(
4, 1, [], successes, temp_file_name,
opt_test_filter=_MakeTestFilter(successes),
opt_filter_tests_after_sharding=True)
finally:
os.remove(temp_file_name)
def testMedianComputation(self):
self.assertEquals(2.0, run_browser_tests._MedianTestTime(
{'test1': 2.0, 'test2': 7.0, 'test3': 1.0}))
self.assertEquals(2.0, run_browser_tests._MedianTestTime(
{'test1': 2.0}))
self.assertEquals(0.0, run_browser_tests._MedianTestTime({}))
self.assertEqual(4.0, run_browser_tests._MedianTestTime(
{'test1': 2.0, 'test2': 6.0, 'test3': 1.0, 'test4': 8.0}))
class Algebra(
serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
@classmethod
def GenerateTestCases_Simple(cls, options):
del options # Unused.
yield 'testOne', (1, 2)
yield 'testTwo', (3, 3)
def Simple(self, x, y):
self.assertEquals(x, y)
def TestNumber(self):
self.assertEquals(0, 1)
class ErrorneousGeometric(
serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
@classmethod
def GenerateTestCases_Compare(cls, options):
del options # Unused.
assert False, 'I am a problematic generator'
yield 'testBasic', ('square', 'circle')
def Compare(self, x, y):
self.assertEquals(x, y)
def TestAngle(self):
self.assertEquals(90, 450)
class TestLoadAllTestModules(unittest.TestCase):
def testLoadAllTestsInModule(self):
context = browser_test_context.TypTestContext()
context.finder_options = options_for_unittests.GetCopy()
context.test_class = Algebra
context.test_case_ids_to_run.add(
'telemetry.testing.browser_test_runner_unittest.Algebra.TestNumber')
context.test_case_ids_to_run.add(
'telemetry.testing.browser_test_runner_unittest.Algebra.testOne')
context.Freeze()
browser_test_context._global_test_context = context
try:
# This should not invoke GenerateTestCases of ErrorneousGeometric class,
# otherwise that would throw Exception.
tests = serially_executed_browser_test_case.LoadAllTestsInModule(
sys.modules[__name__])
self.assertEquals(
sorted([t.id() for t in tests]),
['telemetry.testing.browser_test_runner_unittest.Algebra.TestNumber',
'telemetry.testing.browser_test_runner_unittest.Algebra.testOne'])
finally:
browser_test_context._global_test_context = None
[Telemetry] Disable ResultSink on nested tests
Disables ResultSink on "inner" tests, i.e. a test run within another
test, in browser_test_runner_unittest. These inner results are
meaningless to users and currently pollute the actual results.
Bug: chromium:1159139
Change-Id: Ib81a2f5ef458dfedc9eb2173278c02b2749245a0
Reviewed-on: https://chromium-review.googlesource.com/c/catapult/+/2611521
Auto-Submit: Brian Sheedy <8cdc5cc1fa60ba15acb0d296e6f5592b7bb7d71c@chromium.org>
Commit-Queue: John Chen <334fbfbb4df7c78f091ea1b77c69ca6ac731a3f3@chromium.org>
Reviewed-by: John Chen <334fbfbb4df7c78f091ea1b77c69ca6ac731a3f3@chromium.org>
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import string
import sys
import tempfile
import unittest
import json
from telemetry import decorators
from telemetry import project_config
from telemetry.core import util
from telemetry.internal.util import binary_manager
from telemetry.testing import browser_test_context
from telemetry.testing import browser_test_runner
from telemetry.testing import options_for_unittests
from telemetry.testing import run_browser_tests
from telemetry.testing import serially_executed_browser_test_case
_expectations_template = (
'%s'
'# results: [ %s ]\n'
'crbug.com/123 [ %s ] %s [ %s ]')
def _MakeTestExpectations(test_name, tag_list, expectations):
tag_header = ''.join('# tags: [ %s ]\n' % t for t in tag_list)
tags = ' '.join(tag_list)
return _expectations_template % (
tag_header, expectations, tags, test_name, expectations)
def _MakeTestFilter(tests):
return '::'.join(tests)
class BrowserTestRunnerTest(unittest.TestCase):
def setUp(self):
self._test_result = {}
def _ExtractTestResults(self, test_result):
delimiter = test_result['path_delimiter']
failures = []
successes = []
skips = []
def _IsLeafNode(node):
test_dict = node[1]
return ('expected' in test_dict and
isinstance(test_dict['expected'], basestring))
node_queues = []
for t in test_result['tests']:
node_queues.append((t, test_result['tests'][t]))
while node_queues:
node = node_queues.pop()
full_test_name, test_dict = node
if _IsLeafNode(node):
if all(res not in test_dict['expected'].split() for res in
test_dict['actual'].split()):
failures.append(full_test_name)
elif test_dict['actual'] == 'SKIP':
skips.append(full_test_name)
else:
successes.append(full_test_name)
else:
for k in test_dict:
node_queues.append(
('%s%s%s' % (full_test_name, delimiter, k),
test_dict[k]))
return successes, failures, skips
def _RunTest(
self, test_filter, expected_failures, expected_successes,
expected_skips=None, test_name='SimpleTest',
expectations='', tags=None, extra_args=None):
expected_skips = expected_skips or []
tags = tags or []
extra_args = extra_args or []
config = project_config.ProjectConfig(
top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
client_configs=[],
benchmark_dirs=[
os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
)
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
temp_file_name = temp_file.name
if expectations:
expectations_file = tempfile.NamedTemporaryFile(delete=False)
expectations_file.write(expectations)
expectations_file.close()
extra_args.extend(['-X', expectations_file.name] +
['-x=%s' % tag for tag in tags])
args = ([test_name,
'--write-full-results-to=%s' % temp_file_name,
'--test-filter=%s' % test_filter,
# We don't want the underlying tests to report their results to
# ResultDB.
'--disable-resultsink',
] + extra_args)
try:
args = browser_test_runner.ProcessConfig(config, args)
with binary_manager.TemporarilyReplaceBinaryManager(None):
run_browser_tests.RunTests(args)
with open(temp_file_name) as f:
self._test_result = json.load(f)
(actual_successes,
actual_failures,
actual_skips) = self._ExtractTestResults(self._test_result)
self.assertEquals(set(actual_failures), set(expected_failures))
self.assertEquals(set(actual_successes), set(expected_successes))
self.assertEquals(set(actual_skips), set(expected_skips))
finally:
os.remove(temp_file_name)
@decorators.Disabled('chromeos') # crbug.com/696553
def testShortenTestNameUsingTestNamePrefixCommandLineArg(self):
self._RunTest(
test_filter='', expected_failures=[],
expected_successes=['FailingTest'],
test_name='ImplementsGetPlatformTags',
expectations=_MakeTestExpectations(
'FailingTest', ['linux', 'release'], 'Failure'),
extra_args=['--test-name-prefix=browser_tests.browser_test.'
'ImplementsGetPlatformTags.'])
test_result = (
self._test_result['tests']['FailingTest'])
self.assertEqual(test_result['expected'], 'FAIL')
@decorators.Disabled('chromeos') # crbug.com/696553
def testShortenSkipGlobUsingTestNamePrefixCommandLineArg(self):
self._RunTest(
test_filter='', expected_failures=[],
expected_successes=['a/b/fail-test.html'], expected_skips=[],
test_name='ImplementsExpectationsFiles',
extra_args=[
'-x=foo', '--test-name-prefix='
'browser_tests.browser_test.ImplementsExpectationsFiles.',
'--skip=a/b/fail-test.html', '--all'])
@decorators.Disabled('chromeos') # crbug.com/696553
def testShortenTestFilterGlobsUsingTestNamePrefixCommandLineArg(self):
self._RunTest(
test_filter='FailingTest', expected_failures=[],
expected_successes=['FailingTest'],
test_name='ImplementsGetPlatformTags',
expectations=_MakeTestExpectations(
'FailingTest', ['linux', 'release'], 'Failure'),
extra_args=[
'--test-name-prefix='
'browser_tests.browser_test.ImplementsGetPlatformTags.'])
@decorators.Disabled('chromeos') # crbug.com/696553
def testGetExpectationsFromTypWithoutExpectationsFile(self):
test_name = ('browser_tests.browser_test.'
'GetsExpectationsFromTyp.HasNoExpectationsFile')
self._RunTest(
test_filter=test_name, expected_failures=[],
expected_successes=[test_name], test_name='GetsExpectationsFromTyp')
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['GetsExpectationsFromTyp']['HasNoExpectationsFile'])
self.assertEqual(test_result['expected'], 'PASS')
self.assertEqual(test_result['actual'], 'PASS')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testGetExpectationsFromTypWithExpectationsFile(self):
test_name = 'HasExpectationsFile'
self._RunTest(
test_filter=test_name, expected_failures=[test_name],
expected_successes=[], test_name='GetsExpectationsFromTyp',
expectations=_MakeTestExpectations(
test_name, ['foo'], 'RetryOnFailure Failure'), tags=['foo'],
extra_args=[('--test-name-prefix=browser_tests.'
'browser_test.GetsExpectationsFromTyp.')])
test_result = self._test_result['tests']['HasExpectationsFile']
self.assertEqual(test_result['expected'], 'FAIL')
self.assertEqual(test_result['actual'], 'PASS')
self.assertIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testOverrideExpectationsFilesFunction(self):
test_name = ('a/b/fail-test.html')
self._RunTest(
test_filter=test_name, expected_failures=[],
expected_successes=[test_name],
test_name='ImplementsExpectationsFiles',
extra_args=[
'-x=foo',
'--test-name-prefix=browser_tests.browser_test.'
'ImplementsExpectationsFiles.'])
test_result = (
self._test_result['tests']['a']['b']['fail-test.html'])
self.assertEqual(self._test_result['path_delimiter'], '/')
self.assertEqual(test_result['expected'], 'FAIL')
self.assertEqual(test_result['actual'], 'FAIL')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testDoesRetryOnFailureRetriesAndEventuallyPasses(self):
test_name = 'a\\b\\c\\flaky-test.html'
extra_args = [
'--retry-limit=3', '--retry-only-retry-on-failure-tests',
'--test-name-prefix', 'browser_tests.browser_test.FlakyTest.']
self._RunTest(
test_filter=test_name, expected_failures=[],
expected_successes=[test_name], test_name='FlakyTest',
extra_args=extra_args, expectations=_MakeTestExpectations(
test_name, ['foo'], 'RetryOnFailure'), tags=['foo'])
results = (
self._test_result['tests']['a']['b']['c']['flaky-test.html'])
self.assertEqual(self._test_result['path_delimiter'], '\\')
self.assertEqual(results['expected'], 'PASS')
self.assertEqual(results['actual'], 'FAIL FAIL FAIL PASS')
self.assertNotIn('is_unexpected', results)
self.assertNotIn('is_regression', results)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestWithSkipExpectation(self):
test_name = ('browser_tests.browser_test'
'.TestsWillBeDisabled.SupposedToPass')
self._RunTest(
test_filter=test_name, expected_failures=[], expected_successes=[],
expected_skips=[test_name], test_name='TestsWillBeDisabled',
expectations=_MakeTestExpectations(
test_name, ['foo'], 'Skip'), tags=['foo'])
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['TestsWillBeDisabled']['SupposedToPass'])
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestViaCommandlineArgWhilePassingExpectationsFile(self):
test_name = ('browser_tests.browser_test'
'.TestsWillBeDisabled.SupposedToPass')
self._RunTest(
test_filter=test_name, expected_failures=[], expected_successes=[],
expected_skips=[test_name], test_name='TestsWillBeDisabled',
expectations=_MakeTestExpectations(
test_name, ['foo'], 'Failure'), tags=['foo'],
extra_args=['--skip=*SupposedToPass'])
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['TestsWillBeDisabled']['SupposedToPass'])
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestViaCommandLineArgWithoutExpectationsFile(self):
test_name = (
'browser_tests.browser_test.'
'TestsWillBeDisabled.SupposedToPass')
self._RunTest(
test_filter=test_name, expected_failures=[], expected_successes=[],
test_name='TestsWillBeDisabled',
expected_skips=[test_name],
extra_args=['--skip=*SupposedToPass'])
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['TestsWillBeDisabled']['SupposedToPass'])
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testSkipTestWithoutExpectationsFile(self):
test_name = ('browser_tests.browser_test.'
'TestsWillBeDisabled.ThisTestSkips')
self._RunTest(
test_filter=test_name, expected_failures=[], expected_successes=[],
test_name='TestsWillBeDisabled',
expected_skips=[test_name])
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['TestsWillBeDisabled']['ThisTestSkips'])
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
self.assertNotIn('is_unexpected', test_result)
self.assertNotIn('is_regression', test_result)
@decorators.Disabled('chromeos') # crbug.com/696553
def testOverrideGetPlatformTagsFunctionForFailureExpectations(self):
test_name = ('browser_tests.browser_test'
'.ImplementsGetPlatformTags.FailingTest')
self._RunTest(
test_filter=test_name, expected_failures=[],
expected_successes=[test_name],
test_name='ImplementsGetPlatformTags',
expectations=_MakeTestExpectations(
test_name, ['linux', 'release'], 'Failure'))
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['ImplementsGetPlatformTags']['FailingTest'])
self.assertEqual(test_result['expected'], 'FAIL')
self.assertEqual(test_result['actual'], 'FAIL')
@decorators.Disabled('chromeos') # crbug.com/696553
def testOverrideGetPlatformTagsFunctionForSkipExpectations(self):
test_name = ('browser_tests.browser_test'
'.ImplementsGetPlatformTags.FailingTest')
self._RunTest(
test_filter=test_name, expected_failures=[], expected_successes=[],
expected_skips=[test_name],
test_name='ImplementsGetPlatformTags',
expectations=_MakeTestExpectations(
test_name, ['linux', 'release'], 'Skip'))
test_result = (
self._test_result['tests']['browser_tests']['browser_test']
['ImplementsGetPlatformTags']['FailingTest'])
self.assertEqual(test_result['expected'], 'SKIP')
self.assertEqual(test_result['actual'], 'SKIP')
@decorators.Disabled('chromeos') # crbug.com/696553
def testJsonOutputFormatNegativeFilter(self):
failures = [
'browser_tests.simple_numeric_test.SimpleTest.add_1_and_2',
'browser_tests.simple_numeric_test.SimpleTest.add_7_and_3',
'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple_2']
successes = [
'browser_tests.simple_numeric_test.SimpleTest.add_2_and_3',
'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple',
'browser_tests.simple_numeric_test.SimpleTest.multiplier_simple_3']
self._RunTest(
_MakeTestFilter(failures + successes), failures, successes)
@decorators.Disabled('chromeos') # crbug.com/696553
def testJsonOutputWhenSetupClassFailed(self):
failures = [
'browser_tests.failed_tests.SetUpClassFailedTest.dummy_test_0',
'browser_tests.failed_tests.SetUpClassFailedTest.dummy_test_1',
'browser_tests.failed_tests.SetUpClassFailedTest.dummy_test_2']
self._RunTest(
_MakeTestFilter(failures), failures, [],
test_name='SetUpClassFailedTest')
@decorators.Disabled('chromeos') # crbug.com/696553
def testJsonOutputWhenTearDownClassFailed(self):
successes = [
'browser_tests.failed_tests.TearDownClassFailedTest.dummy_test_0',
'browser_tests.failed_tests.TearDownClassFailedTest.dummy_test_1',
'browser_tests.failed_tests.TearDownClassFailedTest.dummy_test_2']
self._RunTest(
_MakeTestFilter(successes), successes, [],
test_name='TearDownClassFailedTest')
@decorators.Disabled('chromeos') # crbug.com/696553
def testSetUpProcessCalledOnce(self):
successes = [
'browser_tests.process_tests.FailIfSetUpProcessCalledTwice.Dummy_0',
'browser_tests.process_tests.FailIfSetUpProcessCalledTwice.Dummy_1',
'browser_tests.process_tests.FailIfSetUpProcessCalledTwice.Dummy_2']
self._RunTest(
_MakeTestFilter(successes), [], successes,
test_name='FailIfSetUpProcessCalledTwice')
@decorators.Disabled('chromeos') # crbug.com/696553
def testTearDownProcessCalledOnce(self):
successes = [
'browser_tests.process_tests.FailIfTearDownProcessCalledTwice.Dummy_0',
'browser_tests.process_tests.FailIfTearDownProcessCalledTwice.Dummy_1',
'browser_tests.process_tests.FailIfTearDownProcessCalledTwice.Dummy_2']
self._RunTest(
_MakeTestFilter(successes), [], successes,
test_name='FailIfTearDownProcessCalledTwice')
@decorators.Disabled('chromeos') # crbug.com/696553
def testJsonOutputFormatPositiveFilter(self):
failures = [
'browser_tests.simple_numeric_test.SimpleTest.TestException',
'browser_tests.simple_numeric_test.SimpleTest.TestSimple']
self._RunTest(
_MakeTestFilter(failures), failures, [])
@decorators.Disabled('chromeos') # crbug.com/696553
def testExecutingTestsInSortedOrder(self):
alphabetical_tests = []
prefix = 'browser_tests.simple_numeric_test.SimpleTest.Alphabetical_'
for i in xrange(20):
alphabetical_tests.append(prefix + str(i))
for c in string.uppercase[:26]:
alphabetical_tests.append(prefix + c)
for c in string.lowercase[:26]:
alphabetical_tests.append(prefix + c)
alphabetical_tests.sort()
self._RunTest(
prefix + '*', [], alphabetical_tests)
def shardingRangeTestHelper(self, total_shards, num_tests):
shard_indices = []
for shard_index in xrange(0, total_shards):
shard_indices.append(run_browser_tests._TestIndicesForShard(
total_shards, shard_index, num_tests))
# Make assertions about ranges
num_tests_run = 0
for i in xrange(0, len(shard_indices)):
cur_indices = shard_indices[i]
num_tests_in_shard = len(cur_indices)
if i < num_tests:
self.assertGreater(num_tests_in_shard, 0)
num_tests_run += num_tests_in_shard
else:
# Not enough tests to go around all of the shards.
self.assertEquals(num_tests_in_shard, 0)
# Assert that we run all of the tests exactly once.
all_indices = set()
for i in xrange(0, len(shard_indices)):
cur_indices = shard_indices[i]
all_indices.update(cur_indices)
self.assertEquals(num_tests_run, num_tests)
self.assertEquals(num_tests_run, len(all_indices))
def testShardsWithPrimeNumTests(self):
for total_shards in xrange(1, 20):
# Nice non-prime number
self.shardingRangeTestHelper(total_shards, 101)
def testShardsWithDivisibleNumTests(self):
for total_shards in xrange(1, 6):
self.shardingRangeTestHelper(total_shards, 8)
def testShardBoundaryConditions(self):
self.shardingRangeTestHelper(1, 0)
self.shardingRangeTestHelper(1, 1)
self.shardingRangeTestHelper(2, 1)
def BaseShardingTest(self, total_shards, shard_index, failures, successes,
opt_abbr_input_json_file=None,
opt_test_filter='',
opt_filter_tests_after_sharding=False,
opt_test_name_prefix=''):
config = project_config.ProjectConfig(
top_level_dir=os.path.join(util.GetTelemetryDir(), 'examples'),
client_configs=[],
benchmark_dirs=[
os.path.join(util.GetTelemetryDir(), 'examples', 'browser_tests')]
)
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
temp_file_name = temp_file.name
opt_args = []
if opt_abbr_input_json_file:
opt_args += [
'--read-abbreviated-json-results-from=%s' % opt_abbr_input_json_file]
if opt_test_filter:
opt_args += [
'--test-filter=%s' % opt_test_filter]
if opt_filter_tests_after_sharding:
opt_args += ['--filter-tests-after-sharding']
if opt_test_name_prefix:
opt_args += ['--test-name-prefix=%s' % opt_test_name_prefix]
args = (['SimpleShardingTest',
'--write-full-results-to=%s' % temp_file_name,
'--total-shards=%d' % total_shards,
'--shard-index=%d' % shard_index] + opt_args)
try:
args = browser_test_runner.ProcessConfig(config, args)
with binary_manager.TemporarilyReplaceBinaryManager(None):
run_browser_tests.RunTests(args)
with open(temp_file_name) as f:
test_result = json.load(f)
(actual_successes,
actual_failures, _) = self._ExtractTestResults(test_result)
self.assertEquals(set(actual_failures), set(failures))
self.assertEquals(set(actual_successes), set(successes))
finally:
os.remove(temp_file_name)
@decorators.Disabled('chromeos') # crbug.com/696553
def testShardedTestRun(self):
self.BaseShardingTest(3, 0, [], [
'browser_tests.simple_sharding_test.SimpleShardingTest.Test1',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_0',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_3',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_6',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_9',
])
self.BaseShardingTest(3, 1, [], [
'browser_tests.simple_sharding_test.SimpleShardingTest.Test2',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_1',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_4',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_7',
])
self.BaseShardingTest(3, 2, [], [
'browser_tests.simple_sharding_test.SimpleShardingTest.Test3',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_2',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_5',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_8',
])
def writeMockTestResultsFile(self):
mock_test_results = {
'passes': [
'Test1',
'Test2',
'Test3',
'passing_test_0',
'passing_test_1',
'passing_test_2',
'passing_test_3',
'passing_test_4',
'passing_test_5',
'passing_test_6',
'passing_test_7',
'passing_test_8',
'passing_test_9',
],
'failures': [],
'valid': True,
'times': {
'Test1': 3.0,
'Test2': 3.0,
'Test3': 3.0,
'passing_test_0': 3.0,
'passing_test_1': 2.0,
'passing_test_2': 2.0,
'passing_test_3': 2.0,
'passing_test_4': 2.0,
'passing_test_5': 1.0,
'passing_test_6': 1.0,
'passing_test_7': 1.0,
'passing_test_8': 1.0,
'passing_test_9': 0.5,
}
}
temp_file = tempfile.NamedTemporaryFile(delete=False)
temp_file.close()
temp_file_name = temp_file.name
with open(temp_file_name, 'w') as f:
json.dump(mock_test_results, f)
return temp_file_name
@decorators.Disabled('chromeos') # crbug.com/696553
def testSplittingShardsByTimes(self):
temp_file_name = self.writeMockTestResultsFile()
# It seems that the sorting order of the first four tests above is:
# passing_test_0, Test1, Test2, Test3
# This is probably because the relative order of the "fixed" tests
# (starting with "Test") and the generated ones ("passing_") is
# not well defined, and the sorting is stable afterward. The
# expectations have been adjusted for this fact.
try:
self.BaseShardingTest(4, 0, [], [
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_0',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_1',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_5',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_9'
], temp_file_name)
self.BaseShardingTest(4, 1, [], [
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.Test1',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_2',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_6'
], temp_file_name)
self.BaseShardingTest(4, 2, [], [
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.Test2',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_3',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_7'
], temp_file_name)
self.BaseShardingTest(4, 3, [], [
'browser_tests.simple_sharding_test.SimpleShardingTest.Test3',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_4',
'browser_tests.simple_sharding_test' +
'.SimpleShardingTest.passing_test_8'
], temp_file_name)
finally:
os.remove(temp_file_name)
@decorators.Disabled('chromeos') # crbug.com/696553
def testFilterTestShortenedNameAfterShardingWithoutTestTimes(self):
self.BaseShardingTest(
4, 0, [], ['passing_test_8'],
opt_test_name_prefix=('browser_tests.'
'simple_sharding_test.SimpleShardingTest.'),
opt_test_filter='passing_test_8',
opt_filter_tests_after_sharding=True)
@decorators.Disabled('chromeos') # crbug.com/696553
def testFilterTestShortenedNameAfterShardingWithTestTimes(self):
temp_file_name = self.writeMockTestResultsFile()
try:
self.BaseShardingTest(
4, 3, [], ['passing_test_8'], temp_file_name,
opt_test_name_prefix=('browser_tests.'
'simple_sharding_test.SimpleShardingTest.'),
opt_test_filter='passing_test_8',
opt_filter_tests_after_sharding=True)
finally:
os.remove(temp_file_name)
@decorators.Disabled('chromeos') # crbug.com/696553
def testFilteringAfterSharding(self):
temp_file_name = self.writeMockTestResultsFile()
successes = [
'browser_tests.simple_sharding_test.SimpleShardingTest.Test1',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_2',
'browser_tests.simple_sharding_test.SimpleShardingTest.passing_test_6']
try:
self.BaseShardingTest(
4, 1, [], successes, temp_file_name,
opt_test_filter=_MakeTestFilter(successes),
opt_filter_tests_after_sharding=True)
finally:
os.remove(temp_file_name)
def testMedianComputation(self):
self.assertEquals(2.0, run_browser_tests._MedianTestTime(
{'test1': 2.0, 'test2': 7.0, 'test3': 1.0}))
self.assertEquals(2.0, run_browser_tests._MedianTestTime(
{'test1': 2.0}))
self.assertEquals(0.0, run_browser_tests._MedianTestTime({}))
self.assertEqual(4.0, run_browser_tests._MedianTestTime(
{'test1': 2.0, 'test2': 6.0, 'test3': 1.0, 'test4': 8.0}))
class Algebra(
serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
@classmethod
def GenerateTestCases_Simple(cls, options):
del options # Unused.
yield 'testOne', (1, 2)
yield 'testTwo', (3, 3)
def Simple(self, x, y):
self.assertEquals(x, y)
def TestNumber(self):
self.assertEquals(0, 1)
class ErrorneousGeometric(
serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
@classmethod
def GenerateTestCases_Compare(cls, options):
del options # Unused.
assert False, 'I am a problematic generator'
yield 'testBasic', ('square', 'circle')
def Compare(self, x, y):
self.assertEquals(x, y)
def TestAngle(self):
self.assertEquals(90, 450)
class TestLoadAllTestModules(unittest.TestCase):
def testLoadAllTestsInModule(self):
context = browser_test_context.TypTestContext()
context.finder_options = options_for_unittests.GetCopy()
context.test_class = Algebra
context.test_case_ids_to_run.add(
'telemetry.testing.browser_test_runner_unittest.Algebra.TestNumber')
context.test_case_ids_to_run.add(
'telemetry.testing.browser_test_runner_unittest.Algebra.testOne')
context.Freeze()
browser_test_context._global_test_context = context
try:
# This should not invoke GenerateTestCases of ErrorneousGeometric class,
# otherwise that would throw Exception.
tests = serially_executed_browser_test_case.LoadAllTestsInModule(
sys.modules[__name__])
self.assertEquals(
sorted([t.id() for t in tests]),
['telemetry.testing.browser_test_runner_unittest.Algebra.TestNumber',
'telemetry.testing.browser_test_runner_unittest.Algebra.testOne'])
finally:
browser_test_context._global_test_context = None
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals, print_function
from six import iteritems, binary_type, text_type, string_types
from werkzeug.local import Local, release_local
import os, sys, importlib, inspect, json, frappe
from past.builtins import cmp
from faker import Faker
# public
from .exceptions import *
from .utils.jinja import (get_jenv, get_template, render_template, get_email_from_template, get_jloader)
# Harmless for Python 3
# For Python 2 set default encoding to utf-8
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
__version__ = '12.1.0'
__title__ = "Frappe Framework"
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
from frappe.translate import get_full_dict
from frappe.utils import strip_html_tags, is_html
if not hasattr(local, 'lang'):
local.lang = lang or 'en'
if not lang:
lang = local.lang
non_translated_msg = msg
if is_html(msg):
msg = strip_html_tags(msg)
# msg should always be unicode
msg = as_unicode(msg).strip()
# return lang_full_dict according to lang passed parameter
return get_full_dict(lang).get(msg) or non_translated_msg
def as_unicode(text, encoding='utf-8'):
'''Convert to unicode if required'''
if isinstance(text, text_type):
return text
elif text==None:
return ''
elif isinstance(text, binary_type):
return text_type(text, encoding)
else:
return text_type(text)
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None, new_site=False):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"currently_saving": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
"new_site": new_site
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = _dict()
local.user = None
local.user_perms = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.link_count = {}
local.jenv = None
local.jloader =None
local.cache = {}
local.document_cache = {}
local.meta_cache = {}
local.form_dict = _dict()
local.session = _dict()
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from frappe.database import get_db
if site:
init(site)
local.db = get_db(user=db_name or local.conf.db_name)
set_user("Administrator")
def connect_replica():
from frappe.database import get_db
user = local.conf.db_name
password = local.conf.db_password
if local.conf.different_credentials_for_replica:
user = local.conf.replica_db_name
password = local.conf.replica_db_password
local.replica_db = get_db(host=local.conf.replica_host, user=user, password=password)
# swap db connections
local.primary_db = local.db
local.db = local.replica_db
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
elif local.site and not local.flags.new_site:
print("{0} does not exist".format(local.site))
sys.exit(1)
#raise IncorrectSitePath, "{0} does not exist".format(site_config)
return _dict(config)
def get_conf(site=None):
if hasattr(local, 'conf'):
return local.conf
else:
# if no site, get from common_site_config.json
with init_site(site):
return local.conf
class init_site:
def __init__(self, site=None):
'''If site==None, initialize it for empty site ('') to load common_site_config.json'''
self.site = site or ''
def __enter__(self):
init(self.site)
return local
def __exit__(self, type, value, traceback):
destroy()
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get('redis_cache')
or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
from frappe.utils import get_traceback
return get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
msg = as_unicode(msg)
if not request or (not "cmd" in local.form_dict) or conf.developer_mode:
print(msg)
error_log.append({"exc": msg})
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print(repr(msg))
debug_log.append(as_unicode(msg))
def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False, primary_action=None):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param title: [optional] Message title.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
:param primary_action: [optional] Bind a primary server/client side action.
"""
from frappe.utils import encode
msg = safe_decode(msg)
out = _dict(message=msg)
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception(msg)
else:
raise ValidationError(msg)
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages and out.msg:
print("Message: " + repr(out.msg).encode("utf-8"))
if title:
out.title = title
if not indicator and raise_exception:
indicator = 'red'
if indicator:
out.indicator = indicator
if alert:
out.alert = 1
if primary_action:
out.primary_action = primary_action
message_log.append(json.dumps(out))
if raise_exception and hasattr(raise_exception, '__name__'):
local.response['exc_type'] = raise_exception.__name__
_raise_exception()
def clear_messages():
local.message_log = []
def clear_last_message():
if len(local.message_log) > 0:
local.message_log = local.message_log[:-1]
def throw(msg, exc=ValidationError, title=None):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc, title=title, indicator='red')
def emit_js(js, user=False, **kwargs):
from frappe.realtime import publish_realtime
if user == False:
user = session.user
publish_realtime('eval_js', js, user=user, **kwargs)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_perms = None
def get_user():
from frappe.utils.user import UserPermissions
if not local.user_perms:
local.user_perms = UserPermissions(local.session.user)
return local.user_perms
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.permissions
return frappe.permissions.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=[], sender="", subject="No Subject", message="No Message",
as_markdown=False, delayed=True, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=[], bcc=[], message_id=None, in_reply_to=None, send_after=None, expose_recipients=None,
send_priority=1, communication=None, retry=1, now=None, read_receipt=None, is_notification=False,
inline_images=None, template=None, args=None, header=None, print_letterhead=False):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param delayed: Send via scheduled email sender **Email Queue**. Don't send immediately. Default is true
:param send_priority: Priority for Email Queue, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To Email Address.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
:param communication: Communication link to be set in Email Queue record
:param inline_images: List of inline images as {"filename", "filecontent"}. All src properties will be replaced with random Content-Id
:param template: Name of html template from templates/emails folder
:param args: Arguments for rendering the template
:param header: Append header in email
"""
if not sender:
sender = frappe.db.get_value('User', frappe.session.user, 'email')
text_content = None
if template:
message, text_content = get_email_from_template(template, args)
message = content or message
if as_markdown:
message = frappe.utils.md_to_html(message)
if not delayed:
now = True
from frappe.email import queue
queue.send(recipients=recipients, sender=sender,
subject=subject, message=message, text_content=text_content,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, bcc=bcc, message_id=message_id, in_reply_to=in_reply_to,
send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority,
communication=communication, now=now, read_receipt=read_receipt, is_notification=is_notification,
inline_images=inline_images, header=header, print_letterhead=print_letterhead)
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def read_only():
def innfn(fn):
def wrapper_fn(*args, **kwargs):
if conf.read_from_replica:
connect_replica()
try:
retval = fn(*args, **get_newargs(fn, kwargs))
except:
raise
finally:
if local and hasattr(local, 'primary_db'):
local.db.close()
local.db = local.primary_db
return retval
return wrapper_fn
return innfn
def only_for(roles, message=False):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if local.flags.in_test:
return
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
if message:
msgprint(_('Only for {}'.format(', '.join(roles))))
raise PermissionError
def get_domain_data(module):
try:
domain_data = get_hooks('domains')
if module in domain_data:
return _dict(get_attr(get_hooks('domains')[module][0] + '.data'))
else:
return _dict()
except ImportError:
if local.flags.in_test:
return _dict()
else:
raise
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.cache_manager
if doctype:
frappe.cache_manager.clear_doctype_cache(doctype)
reset_metadata_version()
elif user:
frappe.cache_manager.clear_user_cache(user)
else: # everything
from frappe import translate
frappe.cache_manager.clear_user_cache()
translate.clear_cache()
reset_metadata_version()
local.cache = {}
local.new_doc_templates = {}
for fn in get_hooks("clear_cache"):
get_attr(fn)()
local.role_permissions = {}
def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not doctype and doc:
doctype = doc.doctype
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doc=None, ptype='read', user=None, verbose=False, doctype=None):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
if doc:
if isinstance(doc, string_types):
doc = get_doc(doctype, doc)
doctype = doc.doctype
if doc.flags.ignore_permissions:
return True
# check permission in controller
if hasattr(doc, 'has_website_permission'):
return doc.has_website_permission(ptype, user, verbose=verbose)
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
for method in hooks:
result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224(((txt or "") + repr(time.time()) + repr(random_string(8))).encode()).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value=None):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_cached_doc(*args, **kwargs):
if args and len(args) > 1 and isinstance(args[1], text_type):
key = get_document_cache_key(args[0], args[1])
# local cache
doc = local.document_cache.get(key)
if doc:
return doc
# redis cache
doc = cache().hget('document_cache', key)
if doc:
doc = get_doc(doc)
local.document_cache[key] = doc
return doc
# database
doc = get_doc(*args, **kwargs)
return doc
def get_document_cache_key(doctype, name):
return '{0}::{1}'.format(doctype, name)
def clear_document_cache(doctype, name):
cache().hdel("last_modified", doctype)
key = get_document_cache_key(doctype, name)
if key in local.document_cache:
del local.document_cache[key]
cache().hdel('document_cache', key)
def get_cached_value(doctype, name, fieldname, as_dict=False):
doc = get_cached_doc(doctype, name)
if isinstance(fieldname, string_types):
if as_dict:
throw('Cannot make dict for single fieldname')
return doc.get(fieldname)
values = [doc.get(f) for f in fieldname]
if as_dict:
return _dict(zip(fieldname, values))
return values
def get_doc(*args, **kwargs):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
doc = frappe.model.document.get_doc(*args, **kwargs)
# set in cache
if args and len(args) > 1:
key = get_document_cache_key(args[0], args[1])
local.document_cache[key] = doc
cache().hset('document_cache', key, doc.as_dict())
return doc
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags, ignore_on_trash, ignore_missing)
def delete_doc_if_exists(doctype, name, force=0):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name, force=force)
def reload_doctype(doctype, force=False, reset_permissions=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype),
force=force, reset_permissions=reset_permissions)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions)
def rename_doc(*args, **kwargs):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(*args, **kwargs)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
for app in get_file_items(os.path.join(local.site_path, "apps.txt")):
if app not in apps:
apps.append(app)
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_doc_hooks():
'''Returns hooked methods for given doc. It will expand the dict tuple if required.'''
if not hasattr(local, 'doc_events_hooks'):
hooks = get_hooks('doc_events', {})
out = {}
for key, value in iteritems(hooks):
if isinstance(key, tuple):
for doctype in key:
append_hook(out, doctype, value)
else:
append_hook(out, key, value)
local.doc_events_hooks = out
return local.doc_events_hooks
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
print('Could not find app "{0}"'.format(app_name))
if not request:
sys.exit(1)
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
no_cache = conf.developer_mode or False
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
if no_cache:
hooks = _dict(load_app_hooks())
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def append_hook(target, key, value):
'''appends a hook to the the target dict.
If the hook key, exists, it will make it a key.
If the hook value is a dict, like doc_events, it will
listify the values against the key.
'''
if isinstance(value, dict):
# dict? make a list of values against each key
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
# make a list
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
if isinstance(path, text_type):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return as_unicode(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if isinstance(fn, string_types):
fn = get_attr(fn)
newargs = get_newargs(fn, kwargs)
return fn(*args, **newargs)
def get_newargs(fn, kwargs):
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
try:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
except ValueError:
fnargs = inspect.getfullargspec(fn).args
varargs = inspect.getfullargspec(fn).varargs
varkw = inspect.getfullargspec(fn).varkw
defaults = inspect.getfullargspec(fn).defaults
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return newargs
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties).
If doctype is not specified, it will create a property setter for all fields with the
given fieldname"""
args = _dict(args)
if not args.doctype_or_field:
args.doctype_or_field = 'DocField'
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data'
if not args.doctype:
doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname)
else:
doctype_list = [args.doctype]
for doctype in doctype_list:
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data'
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field,
'doc_type': doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.validate_fieldtype_change()
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import."""
from frappe.core.doctype.data_import import data_import
data_import.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by']
if not local.flags.in_test:
fields_to_clear.append("docstatus")
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.set("__islocal", 1)
for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']):
newdoc.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for i, d in enumerate(newdoc.get_all_children()):
d.set("__islocal", 1)
for fieldname in fields_to_clear:
d.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None,
context=None, indicator_color=None, primary_action='/', primary_label = None, fullpage=False,
width=None, template='message'):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code
:param context: web template context
:param indicator_color: color of indicator in title
:param primary_action: route on primary button (default is `/`)
:param primary_label: label on primary button (default is "Home")
:param fullpage: hide header / footer
:param width: Width of message in pixels
:param template: Optionally pass view template
"""
local.message_title = title
local.message = html
local.response['type'] = 'page'
local.response['route'] = template
local.no_cache = 1
if http_status_code:
local.response['http_status_code'] = http_status_code
if not context:
context = {}
if not indicator_color:
if success:
indicator_color = 'green'
elif http_status_code and http_status_code > 300:
indicator_color = 'red'
else:
indicator_color = 'blue'
context['indicator_color'] = indicator_color
context['primary_label'] = primary_label
context['primary_action'] = primary_action
context['error_code'] = http_status_code
context['fullpage'] = fullpage
if width:
context['card_width'] = width
local.response['context'] = context
def redirect_to_message(title, html, http_status_code=None, context=None, indicator_color=None):
"""Redirects to /message?id=random
Similar to respond_as_web_page, but used to 'redirect' and show message pages like success, failure, etc. with a detailed message
:param title: Page title and heading.
:param message: Message to be shown.
:param http_status_code: HTTP status code.
Example Usage:
frappe.redirect_to_message(_('Thank you'), "<div><p>You will receive an email at test@example.com</p></div>")
"""
message_id = generate_hash(length=8)
message = {
'context': context or {},
'http_status_code': http_status_code or 200
}
message['context'].update({
'header': title,
'title': title,
'message': html
})
if indicator_color:
message['context'].update({
"indicator_color": indicator_color
})
cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60)
location = '/message?id={0}'.format(message_id)
if not getattr(local, 'is_ajax', False):
local.response["type"] = "redirect"
local.response["location"] = location
else:
return location
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition=as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for permissions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler, separators=(',', ': '))
def are_emails_muted():
from frappe.utils import cint
return flags.mute_emails or cint(conf.get("mute_emails") or 0) or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def format(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output = None, no_letterhead = 0, password=None):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False.
:param password: Password to encrypt the pdf with. Default None"""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
local.form_dict.doc = doc
local.form_dict.no_letterhead = no_letterhead
options = None
if password:
options = {'password': password}
if not html:
html = build_page("printview")
if as_pdf:
return get_pdf(html, output = output, options = options)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None, lang=None, print_letterhead=True, password=None):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
_lang = local.lang
#set lang as specified in print format attachment
if lang: local.lang = lang
local.flags.ignore_print_permissions = True
no_letterhead = not print_letterhead
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc, no_letterhead=no_letterhead, password=password)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc, no_letterhead=no_letterhead, password=password)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
#reset lang to original local lang
local.lang = _lang
return out
def publish_progress(*args, **kwargs):
"""Show the user progress for a long request
:param percent: Percent progress
:param title: Title
:param doctype: Optional, for document type
:param docname: Optional, for document name
:param description: Optional description
"""
import frappe.realtime
return frappe.realtime.publish_progress(*args, **kwargs)
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.realtime
return frappe.realtime.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def enqueue(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param event: this is passed to enable clearing of jobs from queues
:param is_async: (optional) if is_async=False, the method is executed immediately, else via a worker
:param job_name: (optional) can be used to name an enqueue call, which can be used to prevent duplicate calls
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue(*args, **kwargs)
def enqueue_doc(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param doctype: DocType of the document on which you want to run the event
:param name: Name of the document on which you want to run the event
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue_doc(*args, **kwargs)
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
loggers = {}
log_level = None
def logger(module=None, with_more_info=True):
'''Returns a python logger that uses StreamHandler'''
from frappe.utils.logger import get_logger
return get_logger(module or 'default', with_more_info=with_more_info)
def log_error(message=None, title=None):
'''Log error to Error Log'''
return get_doc(dict(doctype='Error Log', error=as_unicode(message or get_traceback()),
method=title)).insert(ignore_permissions=True)
def get_desk_link(doctype, name):
return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype))
def bold(text):
return '<b>{0}</b>'.format(text)
def safe_eval(code, eval_globals=None, eval_locals=None):
'''A safer `eval`'''
whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round
}
if '__' in code:
throw('Illegal rule {0}. Cannot use "__"'.format(bold(code)))
if not eval_globals:
eval_globals = {}
eval_globals['__builtins__'] = {}
eval_globals.update(whitelisted_globals)
return eval(code, eval_globals, eval_locals)
def get_system_settings(key):
if key not in local.system_settings:
local.system_settings.update({key: db.get_single_value('System Settings', key)})
return local.system_settings.get(key)
def get_active_domains():
from frappe.core.doctype.domain_settings.domain_settings import get_active_domains
return get_active_domains()
def get_version(doctype, name, limit = None, head = False, raise_err = True):
'''
Returns a list of version information of a given DocType (Applicable only if DocType has changes tracked).
Example
>>> frappe.get_version('User', 'foobar@gmail.com')
>>>
[
{
"version": [version.data], # Refer Version DocType get_diff method and data attribute
"user": "admin@gmail.com" # User that created this version
"creation": <datetime.datetime> # Creation timestamp of that object.
}
]
'''
meta = get_meta(doctype)
if meta.track_changes:
names = db.sql("""
SELECT name from tabVersion
WHERE ref_doctype = '{doctype}' AND docname = '{name}'
{order_by}
{limit}
""".format(
doctype = doctype,
name = name,
order_by = 'ORDER BY creation' if head else '',
limit = 'LIMIT {limit}'.format(limit = limit) if limit else ''
))
from frappe.chat.util import squashify, dictify, safe_json_loads
versions = [ ]
for name in names:
name = squashify(name)
doc = get_doc('Version', name)
data = doc.data
data = safe_json_loads(data)
data = dictify(dict(
version = data,
user = doc.owner,
creation = doc.creation
))
versions.append(data)
return versions
else:
if raise_err:
raise ValueError('{doctype} has no versions tracked.'.format(
doctype = doctype
))
@whitelist(allow_guest = True)
def ping():
return "pong"
def safe_encode(param, encoding = 'utf-8'):
try:
param = param.encode(encoding)
except Exception:
pass
return param
def safe_decode(param, encoding = 'utf-8'):
try:
param = param.decode(encoding)
except Exception:
pass
return param
def parse_json(val):
from frappe.utils import parse_json
return parse_json(val)
def mock(type, size = 1, locale = 'en'):
results = [ ]
faker = Faker(locale)
if not type in dir(faker):
raise ValueError('Not a valid mock type.')
else:
for i in range(size):
data = getattr(faker, type)()
results.append(data)
from frappe.chat.util import squashify
results = squashify(results)
return results
style: remove a trailing whitespace
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals, print_function
from six import iteritems, binary_type, text_type, string_types
from werkzeug.local import Local, release_local
import os, sys, importlib, inspect, json, frappe
from past.builtins import cmp
from faker import Faker
# public
from .exceptions import *
from .utils.jinja import (get_jenv, get_template, render_template, get_email_from_template, get_jloader)
# Harmless for Python 3
# For Python 2 set default encoding to utf-8
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
__version__ = '12.1.0'
__title__ = "Frappe Framework"
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
from frappe.translate import get_full_dict
from frappe.utils import strip_html_tags, is_html
if not hasattr(local, 'lang'):
local.lang = lang or 'en'
if not lang:
lang = local.lang
non_translated_msg = msg
if is_html(msg):
msg = strip_html_tags(msg)
# msg should always be unicode
msg = as_unicode(msg).strip()
# return lang_full_dict according to lang passed parameter
return get_full_dict(lang).get(msg) or non_translated_msg
def as_unicode(text, encoding='utf-8'):
'''Convert to unicode if required'''
if isinstance(text, text_type):
return text
elif text==None:
return ''
elif isinstance(text, binary_type):
return text_type(text, encoding)
else:
return text_type(text)
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None, new_site=False):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"currently_saving": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
"new_site": new_site
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = _dict()
local.user = None
local.user_perms = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.link_count = {}
local.jenv = None
local.jloader =None
local.cache = {}
local.document_cache = {}
local.meta_cache = {}
local.form_dict = _dict()
local.session = _dict()
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from frappe.database import get_db
if site:
init(site)
local.db = get_db(user=db_name or local.conf.db_name)
set_user("Administrator")
def connect_replica():
from frappe.database import get_db
user = local.conf.db_name
password = local.conf.db_password
if local.conf.different_credentials_for_replica:
user = local.conf.replica_db_name
password = local.conf.replica_db_password
local.replica_db = get_db(host=local.conf.replica_host, user=user, password=password)
# swap db connections
local.primary_db = local.db
local.db = local.replica_db
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
elif local.site and not local.flags.new_site:
print("{0} does not exist".format(local.site))
sys.exit(1)
#raise IncorrectSitePath, "{0} does not exist".format(site_config)
return _dict(config)
def get_conf(site=None):
if hasattr(local, 'conf'):
return local.conf
else:
# if no site, get from common_site_config.json
with init_site(site):
return local.conf
class init_site:
def __init__(self, site=None):
'''If site==None, initialize it for empty site ('') to load common_site_config.json'''
self.site = site or ''
def __enter__(self):
init(self.site)
return local
def __exit__(self, type, value, traceback):
destroy()
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get('redis_cache')
or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
from frappe.utils import get_traceback
return get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
msg = as_unicode(msg)
if not request or (not "cmd" in local.form_dict) or conf.developer_mode:
print(msg)
error_log.append({"exc": msg})
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print(repr(msg))
debug_log.append(as_unicode(msg))
def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False, primary_action=None):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param title: [optional] Message title.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
:param primary_action: [optional] Bind a primary server/client side action.
"""
from frappe.utils import encode
msg = safe_decode(msg)
out = _dict(message=msg)
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception(msg)
else:
raise ValidationError(msg)
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages and out.msg:
print("Message: " + repr(out.msg).encode("utf-8"))
if title:
out.title = title
if not indicator and raise_exception:
indicator = 'red'
if indicator:
out.indicator = indicator
if alert:
out.alert = 1
if primary_action:
out.primary_action = primary_action
message_log.append(json.dumps(out))
if raise_exception and hasattr(raise_exception, '__name__'):
local.response['exc_type'] = raise_exception.__name__
_raise_exception()
def clear_messages():
local.message_log = []
def clear_last_message():
if len(local.message_log) > 0:
local.message_log = local.message_log[:-1]
def throw(msg, exc=ValidationError, title=None):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc, title=title, indicator='red')
def emit_js(js, user=False, **kwargs):
from frappe.realtime import publish_realtime
if user == False:
user = session.user
publish_realtime('eval_js', js, user=user, **kwargs)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_perms = None
def get_user():
from frappe.utils.user import UserPermissions
if not local.user_perms:
local.user_perms = UserPermissions(local.session.user)
return local.user_perms
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.permissions
return frappe.permissions.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=[], sender="", subject="No Subject", message="No Message",
as_markdown=False, delayed=True, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=[], bcc=[], message_id=None, in_reply_to=None, send_after=None, expose_recipients=None,
send_priority=1, communication=None, retry=1, now=None, read_receipt=None, is_notification=False,
inline_images=None, template=None, args=None, header=None, print_letterhead=False):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param delayed: Send via scheduled email sender **Email Queue**. Don't send immediately. Default is true
:param send_priority: Priority for Email Queue, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To Email Address.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
:param communication: Communication link to be set in Email Queue record
:param inline_images: List of inline images as {"filename", "filecontent"}. All src properties will be replaced with random Content-Id
:param template: Name of html template from templates/emails folder
:param args: Arguments for rendering the template
:param header: Append header in email
"""
if not sender:
sender = frappe.db.get_value('User', frappe.session.user, 'email')
text_content = None
if template:
message, text_content = get_email_from_template(template, args)
message = content or message
if as_markdown:
message = frappe.utils.md_to_html(message)
if not delayed:
now = True
from frappe.email import queue
queue.send(recipients=recipients, sender=sender,
subject=subject, message=message, text_content=text_content,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, bcc=bcc, message_id=message_id, in_reply_to=in_reply_to,
send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority,
communication=communication, now=now, read_receipt=read_receipt, is_notification=is_notification,
inline_images=inline_images, header=header, print_letterhead=print_letterhead)
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def read_only():
def innfn(fn):
def wrapper_fn(*args, **kwargs):
if conf.read_from_replica:
connect_replica()
try:
retval = fn(*args, **get_newargs(fn, kwargs))
except:
raise
finally:
if local and hasattr(local, 'primary_db'):
local.db.close()
local.db = local.primary_db
return retval
return wrapper_fn
return innfn
def only_for(roles, message=False):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if local.flags.in_test:
return
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
if message:
msgprint(_('Only for {}'.format(', '.join(roles))))
raise PermissionError
def get_domain_data(module):
try:
domain_data = get_hooks('domains')
if module in domain_data:
return _dict(get_attr(get_hooks('domains')[module][0] + '.data'))
else:
return _dict()
except ImportError:
if local.flags.in_test:
return _dict()
else:
raise
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.cache_manager
if doctype:
frappe.cache_manager.clear_doctype_cache(doctype)
reset_metadata_version()
elif user:
frappe.cache_manager.clear_user_cache(user)
else: # everything
from frappe import translate
frappe.cache_manager.clear_user_cache()
translate.clear_cache()
reset_metadata_version()
local.cache = {}
local.new_doc_templates = {}
for fn in get_hooks("clear_cache"):
get_attr(fn)()
local.role_permissions = {}
def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not doctype and doc:
doctype = doc.doctype
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doc=None, ptype='read', user=None, verbose=False, doctype=None):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
if doc:
if isinstance(doc, string_types):
doc = get_doc(doctype, doc)
doctype = doc.doctype
if doc.flags.ignore_permissions:
return True
# check permission in controller
if hasattr(doc, 'has_website_permission'):
return doc.has_website_permission(ptype, user, verbose=verbose)
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
for method in hooks:
result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224(((txt or "") + repr(time.time()) + repr(random_string(8))).encode()).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value=None):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_cached_doc(*args, **kwargs):
if args and len(args) > 1 and isinstance(args[1], text_type):
key = get_document_cache_key(args[0], args[1])
# local cache
doc = local.document_cache.get(key)
if doc:
return doc
# redis cache
doc = cache().hget('document_cache', key)
if doc:
doc = get_doc(doc)
local.document_cache[key] = doc
return doc
# database
doc = get_doc(*args, **kwargs)
return doc
def get_document_cache_key(doctype, name):
return '{0}::{1}'.format(doctype, name)
def clear_document_cache(doctype, name):
cache().hdel("last_modified", doctype)
key = get_document_cache_key(doctype, name)
if key in local.document_cache:
del local.document_cache[key]
cache().hdel('document_cache', key)
def get_cached_value(doctype, name, fieldname, as_dict=False):
doc = get_cached_doc(doctype, name)
if isinstance(fieldname, string_types):
if as_dict:
throw('Cannot make dict for single fieldname')
return doc.get(fieldname)
values = [doc.get(f) for f in fieldname]
if as_dict:
return _dict(zip(fieldname, values))
return values
def get_doc(*args, **kwargs):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
doc = frappe.model.document.get_doc(*args, **kwargs)
# set in cache
if args and len(args) > 1:
key = get_document_cache_key(args[0], args[1])
local.document_cache[key] = doc
cache().hset('document_cache', key, doc.as_dict())
return doc
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags, ignore_on_trash, ignore_missing)
def delete_doc_if_exists(doctype, name, force=0):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name, force=force)
def reload_doctype(doctype, force=False, reset_permissions=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype),
force=force, reset_permissions=reset_permissions)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions)
def rename_doc(*args, **kwargs):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(*args, **kwargs)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
for app in get_file_items(os.path.join(local.site_path, "apps.txt")):
if app not in apps:
apps.append(app)
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_doc_hooks():
'''Returns hooked methods for given doc. It will expand the dict tuple if required.'''
if not hasattr(local, 'doc_events_hooks'):
hooks = get_hooks('doc_events', {})
out = {}
for key, value in iteritems(hooks):
if isinstance(key, tuple):
for doctype in key:
append_hook(out, doctype, value)
else:
append_hook(out, key, value)
local.doc_events_hooks = out
return local.doc_events_hooks
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
print('Could not find app "{0}"'.format(app_name))
if not request:
sys.exit(1)
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
no_cache = conf.developer_mode or False
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
if no_cache:
hooks = _dict(load_app_hooks())
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def append_hook(target, key, value):
'''appends a hook to the the target dict.
If the hook key, exists, it will make it a key.
If the hook value is a dict, like doc_events, it will
listify the values against the key.
'''
if isinstance(value, dict):
# dict? make a list of values against each key
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
# make a list
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
if isinstance(path, text_type):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return as_unicode(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if isinstance(fn, string_types):
fn = get_attr(fn)
newargs = get_newargs(fn, kwargs)
return fn(*args, **newargs)
def get_newargs(fn, kwargs):
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
try:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
except ValueError:
fnargs = inspect.getfullargspec(fn).args
varargs = inspect.getfullargspec(fn).varargs
varkw = inspect.getfullargspec(fn).varkw
defaults = inspect.getfullargspec(fn).defaults
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return newargs
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties).
If doctype is not specified, it will create a property setter for all fields with the
given fieldname"""
args = _dict(args)
if not args.doctype_or_field:
args.doctype_or_field = 'DocField'
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data'
if not args.doctype:
doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname)
else:
doctype_list = [args.doctype]
for doctype in doctype_list:
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data'
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field,
'doc_type': doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.validate_fieldtype_change()
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import."""
from frappe.core.doctype.data_import import data_import
data_import.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by']
if not local.flags.in_test:
fields_to_clear.append("docstatus")
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.set("__islocal", 1)
for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']):
newdoc.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for i, d in enumerate(newdoc.get_all_children()):
d.set("__islocal", 1)
for fieldname in fields_to_clear:
d.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None,
context=None, indicator_color=None, primary_action='/', primary_label = None, fullpage=False,
width=None, template='message'):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code
:param context: web template context
:param indicator_color: color of indicator in title
:param primary_action: route on primary button (default is `/`)
:param primary_label: label on primary button (default is "Home")
:param fullpage: hide header / footer
:param width: Width of message in pixels
:param template: Optionally pass view template
"""
local.message_title = title
local.message = html
local.response['type'] = 'page'
local.response['route'] = template
local.no_cache = 1
if http_status_code:
local.response['http_status_code'] = http_status_code
if not context:
context = {}
if not indicator_color:
if success:
indicator_color = 'green'
elif http_status_code and http_status_code > 300:
indicator_color = 'red'
else:
indicator_color = 'blue'
context['indicator_color'] = indicator_color
context['primary_label'] = primary_label
context['primary_action'] = primary_action
context['error_code'] = http_status_code
context['fullpage'] = fullpage
if width:
context['card_width'] = width
local.response['context'] = context
def redirect_to_message(title, html, http_status_code=None, context=None, indicator_color=None):
"""Redirects to /message?id=random
Similar to respond_as_web_page, but used to 'redirect' and show message pages like success, failure, etc. with a detailed message
:param title: Page title and heading.
:param message: Message to be shown.
:param http_status_code: HTTP status code.
Example Usage:
frappe.redirect_to_message(_('Thank you'), "<div><p>You will receive an email at test@example.com</p></div>")
"""
message_id = generate_hash(length=8)
message = {
'context': context or {},
'http_status_code': http_status_code or 200
}
message['context'].update({
'header': title,
'title': title,
'message': html
})
if indicator_color:
message['context'].update({
"indicator_color": indicator_color
})
cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60)
location = '/message?id={0}'.format(message_id)
if not getattr(local, 'is_ajax', False):
local.response["type"] = "redirect"
local.response["location"] = location
else:
return location
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition=as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for permissions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler, separators=(',', ': '))
def are_emails_muted():
from frappe.utils import cint
return flags.mute_emails or cint(conf.get("mute_emails") or 0) or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def format(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output = None, no_letterhead = 0, password=None):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False.
:param password: Password to encrypt the pdf with. Default None"""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
local.form_dict.doc = doc
local.form_dict.no_letterhead = no_letterhead
options = None
if password:
options = {'password': password}
if not html:
html = build_page("printview")
if as_pdf:
return get_pdf(html, output = output, options = options)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None, lang=None, print_letterhead=True, password=None):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
_lang = local.lang
#set lang as specified in print format attachment
if lang: local.lang = lang
local.flags.ignore_print_permissions = True
no_letterhead = not print_letterhead
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc, no_letterhead=no_letterhead, password=password)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc, no_letterhead=no_letterhead, password=password)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
#reset lang to original local lang
local.lang = _lang
return out
def publish_progress(*args, **kwargs):
"""Show the user progress for a long request
:param percent: Percent progress
:param title: Title
:param doctype: Optional, for document type
:param docname: Optional, for document name
:param description: Optional description
"""
import frappe.realtime
return frappe.realtime.publish_progress(*args, **kwargs)
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.realtime
return frappe.realtime.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def enqueue(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param event: this is passed to enable clearing of jobs from queues
:param is_async: (optional) if is_async=False, the method is executed immediately, else via a worker
:param job_name: (optional) can be used to name an enqueue call, which can be used to prevent duplicate calls
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue(*args, **kwargs)
def enqueue_doc(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param doctype: DocType of the document on which you want to run the event
:param name: Name of the document on which you want to run the event
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue_doc(*args, **kwargs)
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
loggers = {}
log_level = None
def logger(module=None, with_more_info=True):
'''Returns a python logger that uses StreamHandler'''
from frappe.utils.logger import get_logger
return get_logger(module or 'default', with_more_info=with_more_info)
def log_error(message=None, title=None):
'''Log error to Error Log'''
return get_doc(dict(doctype='Error Log', error=as_unicode(message or get_traceback()),
method=title)).insert(ignore_permissions=True)
def get_desk_link(doctype, name):
return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype))
def bold(text):
return '<b>{0}</b>'.format(text)
def safe_eval(code, eval_globals=None, eval_locals=None):
'''A safer `eval`'''
whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round
}
if '__' in code:
throw('Illegal rule {0}. Cannot use "__"'.format(bold(code)))
if not eval_globals:
eval_globals = {}
eval_globals['__builtins__'] = {}
eval_globals.update(whitelisted_globals)
return eval(code, eval_globals, eval_locals)
def get_system_settings(key):
if key not in local.system_settings:
local.system_settings.update({key: db.get_single_value('System Settings', key)})
return local.system_settings.get(key)
def get_active_domains():
from frappe.core.doctype.domain_settings.domain_settings import get_active_domains
return get_active_domains()
def get_version(doctype, name, limit = None, head = False, raise_err = True):
'''
Returns a list of version information of a given DocType (Applicable only if DocType has changes tracked).
Example
>>> frappe.get_version('User', 'foobar@gmail.com')
>>>
[
{
"version": [version.data], # Refer Version DocType get_diff method and data attribute
"user": "admin@gmail.com" # User that created this version
"creation": <datetime.datetime> # Creation timestamp of that object.
}
]
'''
meta = get_meta(doctype)
if meta.track_changes:
names = db.sql("""
SELECT name from tabVersion
WHERE ref_doctype = '{doctype}' AND docname = '{name}'
{order_by}
{limit}
""".format(
doctype = doctype,
name = name,
order_by = 'ORDER BY creation' if head else '',
limit = 'LIMIT {limit}'.format(limit = limit) if limit else ''
))
from frappe.chat.util import squashify, dictify, safe_json_loads
versions = [ ]
for name in names:
name = squashify(name)
doc = get_doc('Version', name)
data = doc.data
data = safe_json_loads(data)
data = dictify(dict(
version = data,
user = doc.owner,
creation = doc.creation
))
versions.append(data)
return versions
else:
if raise_err:
raise ValueError('{doctype} has no versions tracked.'.format(
doctype = doctype
))
@whitelist(allow_guest = True)
def ping():
return "pong"
def safe_encode(param, encoding = 'utf-8'):
try:
param = param.encode(encoding)
except Exception:
pass
return param
def safe_decode(param, encoding = 'utf-8'):
try:
param = param.decode(encoding)
except Exception:
pass
return param
def parse_json(val):
from frappe.utils import parse_json
return parse_json(val)
def mock(type, size = 1, locale = 'en'):
results = [ ]
faker = Faker(locale)
if not type in dir(faker):
raise ValueError('Not a valid mock type.')
else:
for i in range(size):
data = getattr(faker, type)()
results.append(data)
from frappe.chat.util import squashify
results = squashify(results)
return results
|
#!/usr/bin/python
import sys
import locale
import RPi.GPIO as GPIO
from flask import Flask, send_from_directory, jsonify, request
from flask.ext.socketio import SocketIO, emit
## Parameters
GPIO_FAN = 17
PWM_FREQUENCY = 1000
## REST API URLs
BASE_URL="/"
FAN_URL = BASE_URL + "fan/"
EVENT_URL = BASE_URL + "events/"
## Instanciate Flask (Static files and REST API)
app = Flask(__name__)
## Instanciate SocketIO (Websockets, used for events) on top of it
socketio = SocketIO(app)
## Deliver statc files (index.html, css, images and js files)
@app.route(BASE_URL)
def index():
return send_from_directory('static', 'index.html')
@app.route(BASE_URL + 'css/<path:path>')
def static_css_proxy(path):
return send_from_directory('static/css/', path)
@app.route(BASE_URL + 'js/<path:path>')
def static_js_proxy(path):
return send_from_directory('static/js/', path)
## REST API
@app.route(FAN_URL + '<int:percent>', methods=['PUT', 'GET'])
def set_fan_speed(percent):
global duty_cycle
# Set PWM-DutyCycle of pin
duty_cycle = percent
led.ChangeDutyCycle(duty_cycle)
socketio.emit('fanEvent', {'data': 'Set fan speed (PUT) to ' + str(duty_cycle)}, namespace="/events")
return jsonify({'error': 0}), 200
@app.route(FAN_URL, methods=['POST'])
def set_fan_speed_post():
global duty_cycle
# Set PWM-DutyCycle of pin
print str(request.form)
print str(request.form['speed'])
duty_cycle = request.form['speed']
led.ChangeDutyCycle(int(duty_cycle))
socketio.emit('fanEvent', {'data': 'Set fan speed (POST) to ' + str(duty_cycle)}, namespace="/events")
return jsonify({'error': 0}), 200
@app.route(FAN_URL, methods=['GET'])
def get_fan_speed():
return jsonify({'speed': duty_cycle}), 200
@app.route(EVENT_URL, methods=['POST'])
def broadcast_event():
socketio.emit('serverEvent', {'data': request.json['data']}, namespace="/events")
return jsonify({'error': 0}), 201
## Events
## DEPRECATED - Events are broadcasted as they come in on POST requests
@socketio.on('webEvent', namespace='/events')
def test_message(message):
emit('serverEvent', {'data': message['data']}, broadcast=True)
## Init Raspberry GPIO
def init_pwm():
global led
global duty_cycle
# Setup PWM for fan control
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_FAN, GPIO.OUT)
led = GPIO.PWM(GPIO_FAN, PWM_FREQUENCY)
duty_cycle = 50
led.start(duty_cycle)
# Main - Start Flask server through SocketIO for websocket support
if __name__ == '__main__':
# Set locale for Flask
locale.setlocale(locale.LC_ALL, '')
init_pwm()
# Set debug option if desired
if "debug" in sys.argv:
app.debug = True
# Blocking! - Start Flask server
socketio.run(app, host='0.0.0.0')
# Reset GPIO
led.stop()
GPIO.cleanup()
[Server] Refactoring fanspeed setter.
Signed-off-by: Juri Berlanda <5bfdca9e82c53adb0603ce7083f4ba4f2da5cacf@hotmail.com>
#!/usr/bin/python
import sys
import locale
import RPi.GPIO as GPIO
from flask import Flask, send_from_directory, jsonify, request
from flask.ext.socketio import SocketIO, emit
## Parameters
GPIO_FAN = 17
PWM_FREQUENCY = 1000
## REST API URLs
BASE_URL="/"
FAN_URL = BASE_URL + "fan/"
EVENT_URL = BASE_URL + "events/"
## Instanciate Flask (Static files and REST API)
app = Flask(__name__)
## Instanciate SocketIO (Websockets, used for events) on top of it
socketio = SocketIO(app)
## Deliver statc files (index.html, css, images and js files)
@app.route(BASE_URL)
def index():
return send_from_directory('static', 'index.html')
@app.route(BASE_URL + 'css/<path:path>')
def static_css_proxy(path):
return send_from_directory('static/css/', path)
@app.route(BASE_URL + 'js/<path:path>')
def static_js_proxy(path):
return send_from_directory('static/js/', path)
## REST API
@app.route(FAN_URL + '<int:speed>', methods=['PUT', 'GET'])
def set_fan_speed(speed):
set_fanspeed(speed)
return jsonify({'error': 0}), 200
@app.route(FAN_URL, methods=['POST'])
def set_fan_speed_post():
set_fanspeed(request.form['speed'])
return jsonify({'error': 0}), 200
@app.route(FAN_URL, methods=['GET'])
def get_fan_speed():
return jsonify({'speed': duty_cycle}), 200
@app.route(EVENT_URL, methods=['POST'])
def broadcast_event():
socketio.emit('serverEvent', {'data': request.json['data']}, namespace="/events")
return jsonify({'error': 0}), 201
## Events
## DEPRECATED - Events are broadcasted as they come in on POST requests
@socketio.on('webEvent', namespace='/events')
def test_message(message):
emit('serverEvent', {'data': message['data']}, broadcast=True)
## Init Raspberry GPIO
def init_pwm():
global led
global duty_cycle
# Setup PWM for fan control
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_FAN, GPIO.OUT)
led = GPIO.PWM(GPIO_FAN, PWM_FREQUENCY)
duty_cycle = 50
led.start(duty_cycle)
## Setter for fan speed
def set_fanspeed(speed):
global duty_cycle
# Set PWM-DutyCycle of pin
duty_cycle = duty_cycle = min(max(speed, 0), 100)
led.ChangeDutyCycle(int(duty_cycle))
# TODO Remove when working
socketio.emit('serverEvent', {'data': 'Set fan speed to ' + str(duty_cycle)}, namespace="/events")
# Main - Start Flask server through SocketIO for websocket support
if __name__ == '__main__':
# Set locale for Flask
locale.setlocale(locale.LC_ALL, '')
init_pwm()
# Set debug option if desired
if "debug" in sys.argv:
app.debug = True
# Blocking! - Start Flask server
socketio.run(app, host='0.0.0.0')
# Reset GPIO
led.stop()
GPIO.cleanup()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015
#
# STIC - Universidad de La Laguna (ULL) <gesinv@ull.edu.es>
#
# This file is part of Modelado de Servicios TIC.
#
# Modelado de Servicios TIC is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Modelado de Servicios TIC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Modelado de Servicios TIC. If not, see
# <http://www.gnu.org/licenses/>.
#
import os, sys, getopt
reload(sys)
sys.setdefaultencoding('utf-8')
from xml.dom import minidom
from xml.sax.handler import ContentHandler
import xml.sax # Modulo que analiza sintacticamente un archivo xml
xml_analizador = xml.sax.make_parser() # Objeto parse que va a analizar el fichero
xml_analizador.setContentHandler(ContentHandler()) # Manejador de contenido
if (len(sys.argv) == 2 and sys.argv[1] != "runserver"):
nombre_fichero = str(sys.argv[1])
else:
nombre_fichero = "Archi_Upload.archimate"
# Controla que la sintaxis del fichero xml sea correcta
try:
xml_analizador.parse(nombre_fichero) # Analizamos el fichero
print("\nEl fichero XML " + nombre_fichero + " está bien formado.")
except:
print ("\nError:\n\t " + nombre_fichero + " no es un fichero bien formado")
#sys.exit()
#Obtenemos el documento completo
xml_documento = minidom.parse(nombre_fichero)
nodos = xml_documento.childNodes
#El comentario de debajo es un ejemplo de la estructura que hay que seguir para obtener el valor de un atributo
#lista = nodos[0].getElementsByTagName("element")[0].attributes.get("name").value
#print(lista)
#Lista de todos los nodos de tipo "element"
lista = nodos[0].getElementsByTagName("element")
#Contenedores para los objetos Archimate
BusinessServiceArray =[]
BusinessRoleArray =[]
AssociationRelationshipArray = []
GroupArray = []
GroupCriticArray = []
GroupRoleArray = []
GroupAccessArray = []
ServicesPerGroupArray = []
ViewsNameArray = []
#Nombre de las vistas
VistaGruposServicios = "Carta de servicios"
VistaRoles = "Roles"
VistaCriticidad = "Criticidad"
VistaAcceso = "Nivel de Acceso"
#Almacenamiento de servicios
def cargaBusinessService():
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:BusinessService"):
nombre_servicio = nodo.attributes.get("name").value
id_servicio = nodo.attributes.get("id").value
listahijos = nodo.getElementsByTagName("property")
clave = []
valor = []
for hijo in listahijos:
clave.append(hijo.attributes.get("key").value)
if(hijo.attributes.get("value") != None):
valor.append(hijo.attributes.get("value").value)
else:
valor.append("Campo Vacío")
criticidad = ServiceCritic(id_servicio)
acceso = ServiceAccess(id_servicio)
if(nombre_servicio not in ["Servicio","Servicio 1","Servicio 2","Servicio 3"]):
BusinessServiceArray.append([id_servicio,nombre_servicio, clave, valor, criticidad, acceso])
#Almacenamiento de roles
def cargaBusinessRole():
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:BusinessRole"):
nombre_rol = nodo.attributes.get("name").value
id_roles = nodo.attributes.get("id").value
for i in BusinessRoleArray:
if (i[0] == id_roles or i[1] == nombre_rol):
print("ERROR: Nombre de ROL o ID repetido en el modelo")
BusinessRoleArray.append([id_roles,nombre_rol])
#Almacenamiento de las relaciones
def cargaAssociationRelationship():
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:AssociationRelationship"):
id_relacion_asociacion = nodo.attributes.get("id").value
source = nodo.attributes.get("source").value
target = nodo.attributes.get("target").value
AssociationRelationshipArray.append([id_relacion_asociacion,source,target])
#Almacenamiento de los grupos de servicio por Criticidad, Rol o Grupo de Servicio
def cargaGroup():
for nodo in lista:
if((nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel")):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
nombre_grupo = hijo.attributes.get("name").value
id_grupo = hijo.attributes.get("id").value
if(nodo.attributes.get("name").value == VistaGruposServicios):
GroupArray.append([id_grupo,nombre_grupo])
elif(nodo.attributes.get("name").value == VistaCriticidad):
GroupCriticArray.append([id_grupo,nombre_grupo])
elif(nodo.attributes.get("name").value == VistaRoles):
GroupRoleArray.append([id_grupo,nombre_grupo])
elif(nodo.attributes.get("name").value == VistaAcceso):
GroupAccessArray.append([id_grupo,nombre_grupo])
#Almacenamiento de todos los servicios por grupo para grupos de Servicio, Criticidad, Roles y Nivel de Acceso
def BusinessServicePorGroup():
for nodo in lista:
if((nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel") and ((nodo.attributes.get("name").value) in [VistaCriticidad,VistaGruposServicios,VistaRoles,VistaAcceso])):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
id_grupo = hijo.attributes.get("id").value
listanietos = hijo.getElementsByTagName("child")
for nieto in listanietos:
if (nieto.attributes.get("xsi:type").value == "archimate:DiagramObject"):
for i in BusinessServiceArray:
if( i[0] == nieto.attributes.get("archimateElement").value):
id_nieto = nieto.attributes.get("archimateElement").value
ServicesPerGroupArray.append([str(id_grupo),str(id_nieto)])
#Almacenamiento de Criticidad de Servicio para un servicio concreto
def ServiceCritic(serviceID):
for nodo in lista:
if((nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel") and (str(nodo.attributes.get("name").value)) == VistaCriticidad):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
listanietos = hijo.getElementsByTagName("child")
for nieto in listanietos:
id_nieto = nieto.attributes.get("archimateElement").value
if(serviceID == id_nieto):
return hijo.attributes.get("name").value
#Almacenamiento de nivel de acceso para un servicio concreto
def ServiceAccess(serviceID):
for nodo in lista:
if((nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel") and (str(nodo.attributes.get("name").value)) == VistaAcceso):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
listanietos = hijo.getElementsByTagName("child")
for nieto in listanietos:
id_nieto = nieto.attributes.get("archimateElement").value
if(serviceID == id_nieto):
return hijo.attributes.get("name").value
#Obtencion del nivel de acceso para un servicio
def getServiceAccess(serviceID):
for i in BusinessServiceArray:
if (i[0] == serviceID):
return i[5]
#Obtencion del nivel de criticidad para un servicio
def getServiceCritic(serviceID):
for i in BusinessServiceArray:
if (i[0] == serviceID):
return i[4]
# Obtener el grupo de un servicio a partir del identificador de servicio
def getServiceAllGroups(serviceID):
groups=[]
for i in ServicesPerGroupArray:
if(i[1] == serviceID):
groups.append(i[0])
return group
# Obtener los servicios de un grupo (de Servicios, Crticidad o por Rol) a través del identificador de grupo
def getGroupServices(groupID):
services=[]
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel"):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
if(hijo.attributes.get("id").value == groupID):
listanietos = hijo.getElementsByTagName("child")
for nieto in listanietos:
if (nieto.attributes.get("xsi:type").value == "archimate:DiagramObject"):
for i in BusinessServiceArray:
if( i[0] == nieto.attributes.get("archimateElement").value):
id_nieto = nieto.attributes.get("archimateElement").value
services.append(id_nieto)
return services
# Obtener los roles para los que un servicio está disponible
def getServiceRoles(serviceID):
roles = []
for i in ServicesPerGroupArray:
if(serviceID == i[1]):
for k in GroupRoleArray:
if(k[0] == i[0]):
roles.append(i[0])
return roles
# Obtener el grupo de servicios para un servicio (ID)
def getServiceGroup(serviceID):
for i in ServicesPerGroupArray:
if (i[1] == serviceID):
for k in GroupArray:
if (k[0] == i[0]):
return k[1]
# Obtener los servicios de un rol a traves del identificador de rol
def getRoleServices(roleID):
j = 1
for i in AssociationRelationshipArray:
if (roleID == i[1]):
print("\n"+ str(j)+") " + str(getBusinessServiceName(i[2])))
j = j+1
elif (roleID == i[2]):
print("\n"+ str(j)+") " + str(getBusinessServiceName(i[1])))
j = j+1
##Obtener nombres a partir de IDs##
def getBusinessRoleName(BusinessRoleID):
for i in BusinessRoleArray:
if(BusinessRoleID == i[0]):
name = i[1]
return name
def getBusinessServiceName(BusinessServiceID):
for i in BusinessServiceArray:
if(BusinessServiceID == i[0]):
name = i[1]
return name
def getGroupName(GroupID):
for i in GroupArray:
if(GroupID == i[0]):
name = i[1]
return name
for i in GroupRoleArray:
if(GroupID == i[0]):
name = i[1]
return name
for i in GroupCriticArray:
if(GroupID == i[0]):
name = i[1]
return name
for i in GroupAccessArray:
if(GroupID == i[0]):
name = i[1]
return name
##Obtener IDs a partir de nombres##
def getBusinessRoleID(BusinessRoleName):
for i in BusinessRoleArray:
if(BusinessRoleName == i[1]):
ID = i[0]
return ID
def getBusinessServiceID(BusinessServiceName):
for i in BusinessServiceArray:
if(BusinessServiceName == i[1]):
ID = i[0]
return ID
def getGroupID(GroupName):
for i in GroupArray:
if(GroupName == i[1]):
ID = i[0]
return ID
for i in GroupCriticArray:
if(GroupName == i[1]):
ID = i[0]
return ID
for i in GroupRoleArray:
if(GroupName == i[1]):
ID = i[0]
return ID
#Devuelve doble lista Key-Value de propiedades de un servicio
def getServiceProperties(serviceID):
for i in BusinessServiceArray:
if (i[0] == serviceID):
key = i[2]
value = i[3]
propertylist = [key,value]
return propertylist
def inicializacion():
cargaBusinessService()
cargaBusinessRole()
cargaAssociationRelationship()
cargaGroup()
BusinessServicePorGroup()
#runtest()
############### TEST ###############
def runtest():
service_nogroup_test()
service_nocriticgroup_test()
service_noaccessgroup_test()
service_multiplegroup_test()
service_multiplecriticgroup_test()
service_multipleaccessgroup_test()
def service_nogroup_test():
for i in BusinessServiceArray:
k = 0
for j in GroupArray:
if([j[0],i[0]] in ServicesPerGroupArray):
k=1
break
if (k == 0):
print " \n [!] WARNING [!] El Servicio "+ i[1] + " no pertenece a ningún Grupo de Servicios \n"
def service_nocriticgroup_test():
for i in BusinessServiceArray:
k = 0
for j in GroupCriticArray:
if([j[0],i[0]] in ServicesPerGroupArray):
k=1
break
if (k == 0):
print " \n [!] WARNING [!] El Servicio "+ i[1] + " no pertenece a ningún Grupo de Criticidad \n"
def service_noaccessgroup_test():
for i in BusinessServiceArray:
k = 0
for j in GroupAccessArray:
if([j[0],i[0]] in ServicesPerGroupArray):
k=1
break
if (k == 0):
print " \n [!] WARNING [!] El Servicio "+ i[1] + " no pertenece a ningún Nivel de Acceso \n"
def service_multiplegroup_test():
for i in BusinessServiceArray:
groups = []
k = 0
for j in GroupArray:
if(([j[0],i[0]] in ServicesPerGroupArray) and (k == 0)):
groups.append(j[0])
k=1
elif(([j[0],i[0]] in ServicesPerGroupArray) and (k == 1)):
groups.append(j[0])
k=2
elif(([j[0],i[0]] in ServicesPerGroupArray)):
groups.append(j[0])
if (k == 2):
errorMessage = " \n [!] WARNING [!] El Servicio "+ i[1] + " pertenece a varios Grupos de Servicios: "
for m in groups:
errorMessage += "[" + getGroupName(m) + "] "
print errorMessage
def service_multiplecriticgroup_test():
for i in BusinessServiceArray:
groups = []
k = 0
for j in GroupCriticArray:
if(([j[0],i[0]] in ServicesPerGroupArray) and (k == 0)):
groups.append(j[0])
k=1
elif(([j[0],i[0]] in ServicesPerGroupArray) and (k == 1)):
groups.append(j[0])
k=2
elif(([j[0],i[0]] in ServicesPerGroupArray)):
groups.append(j[0])
if (k == 2):
errorMessage = " \n [!] WARNING [!] El Servicio "+ i[1] + " pertenece a varios Niveles de Criticidad: "
for m in groups:
errorMessage += "[" + getGroupName(m) + "] "
print errorMessage
def service_multipleaccessgroup_test():
for i in BusinessServiceArray:
groups = []
k = 0
for j in GroupAccessArray:
if(([j[0],i[0]] in ServicesPerGroupArray) and (k == 0)):
groups.append(j[0])
k=1
elif(([j[0],i[0]] in ServicesPerGroupArray) and (k == 1)):
groups.append(j[0])
k=2
elif(([j[0],i[0]] in ServicesPerGroupArray)):
groups.append(j[0])
if (k == 2):
errorMessage = " \n [!] WARNING [!] El Servicio "+ i[1] + " pertenece a varios Niveles de Acceso: "
for m in groups:
errorMessage += "[" + getGroupName(m) + "] "
print errorMessage
Añadiendo funciones para la vista de aplicacion e infraestructura, primera versión de la cadena de fallos
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015
#
# STIC - Universidad de La Laguna (ULL) <gesinv@ull.edu.es>
#
# This file is part of Modelado de Servicios TIC.
#
# Modelado de Servicios TIC is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Modelado de Servicios TIC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Modelado de Servicios TIC. If not, see
# <http://www.gnu.org/licenses/>.
#
import os, sys, getopt
reload(sys)
sys.setdefaultencoding('utf-8')
from xml.dom import minidom
from xml.sax.handler import ContentHandler
import xml.sax # Modulo que analiza sintacticamente un archivo xml
xml_analizador = xml.sax.make_parser() # Objeto parse que va a analizar el fichero
xml_analizador.setContentHandler(ContentHandler()) # Manejador de contenido
if (len(sys.argv) == 2 and sys.argv[1] != "runserver"):
nombre_fichero = str(sys.argv[1])
else:
nombre_fichero = "Archi_Upload.archimate"
# Controla que la sintaxis del fichero xml sea correcta
try:
xml_analizador.parse(nombre_fichero) # Analizamos el fichero
print("\nEl fichero XML " + nombre_fichero + " está bien formado.")
except:
print ("\nError:\n\t " + nombre_fichero + " no es un fichero bien formado")
#sys.exit()
#Obtenemos el documento completo
xml_documento = minidom.parse(nombre_fichero)
nodos = xml_documento.childNodes
#El comentario de debajo es un ejemplo de la estructura que hay que seguir para obtener el valor de un atributo
#lista = nodos[0].getElementsByTagName("element")[0].attributes.get("name").value
#print(lista)
#Lista de todos los nodos de tipo "element"
lista = nodos[0].getElementsByTagName("element")
#Contenedores para los objetos Archimate
BusinessServiceArray =[]
BusinessRoleArray =[]
AssociationRelationshipArray = []
UsedByRelationshipArray = []
GroupArray = []
GroupCriticArray = []
GroupRoleArray = []
GroupAccessArray = []
DeviceArray = []
ApplicationComponentArray = []
GroupDeviceArray = []
ServicesPerGroupArray = []
ViewsNameArray = []
#Nombre de las vistas
VistaGruposServicios = "Carta de servicios"
VistaRoles = "Roles"
VistaCriticidad = "Criticidad"
VistaAcceso = "Nivel de Acceso"
#Almacenamiento de servicios
def cargaBusinessService():
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:BusinessService"):
nombre_servicio = nodo.attributes.get("name").value
id_servicio = nodo.attributes.get("id").value
listahijos = nodo.getElementsByTagName("property")
clave = []
valor = []
for hijo in listahijos:
clave.append(hijo.attributes.get("key").value)
if(hijo.attributes.get("value") != None):
valor.append(hijo.attributes.get("value").value)
else:
valor.append("Campo Vacío")
criticidad = ServiceCritic(id_servicio)
acceso = ServiceAccess(id_servicio)
if(nombre_servicio not in ["Servicio","Servicio 1","Servicio 2","Servicio 3"]):
BusinessServiceArray.append([id_servicio,nombre_servicio, clave, valor, criticidad, acceso])
#Almacenamiento de roles
def cargaBusinessRole():
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:BusinessRole"):
nombre_rol = nodo.attributes.get("name").value
id_roles = nodo.attributes.get("id").value
for i in BusinessRoleArray:
if (i[0] == id_roles or i[1] == nombre_rol):
print("ERROR: Nombre de ROL o ID repetido en el modelo")
BusinessRoleArray.append([id_roles,nombre_rol])
#Almacenamiento de las relaciones
def cargaAssociationRelationship():
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:UsedByRelationship"):
id_relacion = nodo.attributes.get("id").value
source = nodo.attributes.get("source").value
target = nodo.attributes.get("target").value
AssociationRelationshipArray.append([id_relacion,source,target])
#Almacenamiento de las relaciones used-by
def cargaUsedByRelationship():
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:UsedByRelationship"):
id_relacion_asociacion = nodo.attributes.get("id").value
source = nodo.attributes.get("source").value
target = nodo.attributes.get("target").value
UsedByRelationshipArray.append([id_relacion_asociacion,source,target])
# Almacenamiento de componentes de aplicación
def cargaAppComponent():
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:ApplicationComponent"):
nombre_app = nodo.attributes.get("name").value
id_app = nodo.attributes.get("id").value
ApplicationComponentArray.append([id_app,nombre_app])
#Almacenamiento de máquinas fisicas
def cargaDevice():
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:Device"):
nombre_device = nodo.attributes.get("name").value
id_device = nodo.attributes.get("id").value
listahijos = nodo.getElementsByTagName("property")
clave = []
valor = []
grupoDisp = DeviceGroup(id_device)
for hijo in listahijos:
clave.append(hijo.attributes.get("key").value)
if(hijo.attributes.get("value") != None):
valor.append(hijo.attributes.get("value").value)
else:
valor.append("Campo Vacío")
DeviceArray.append([id_device,nombre_device, clave, valor, grupoDisp])
#Almacenamiento de los grupos de servicio por Criticidad, Rol o Grupo de Servicio
def cargaGroup():
for nodo in lista:
if((nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel")):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
nombre_grupo = hijo.attributes.get("name").value
id_grupo = hijo.attributes.get("id").value
if(nodo.attributes.get("name").value == VistaGruposServicios):
GroupArray.append([id_grupo,nombre_grupo])
elif(nodo.attributes.get("name").value == VistaCriticidad):
GroupCriticArray.append([id_grupo,nombre_grupo])
elif(nodo.attributes.get("name").value == VistaRoles):
GroupRoleArray.append([id_grupo,nombre_grupo])
elif(nodo.attributes.get("name").value == VistaAcceso):
GroupAccessArray.append([id_grupo,nombre_grupo])
elif(nodo.attributes.get("name").value == "Web institucional"):
GroupDeviceArray.append([id_grupo,nombre_grupo])
#Almacenamiento de todos los servicios por grupo para grupos de Servicio, Criticidad, Roles y Nivel de Acceso
def BusinessServicePorGroup():
for nodo in lista:
if((nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel") and ((nodo.attributes.get("name").value) in [VistaCriticidad,VistaGruposServicios,VistaRoles,VistaAcceso])):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
id_grupo = hijo.attributes.get("id").value
listanietos = hijo.getElementsByTagName("child")
for nieto in listanietos:
if (nieto.attributes.get("xsi:type").value == "archimate:DiagramObject"):
for i in BusinessServiceArray:
if( i[0] == nieto.attributes.get("archimateElement").value):
id_nieto = nieto.attributes.get("archimateElement").value
ServicesPerGroupArray.append([str(id_grupo),str(id_nieto)])
#Almacenamiento de Criticidad de Servicio para un servicio concreto
def ServiceCritic(serviceID):
for nodo in lista:
if((nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel") and (str(nodo.attributes.get("name").value)) == VistaCriticidad):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
listanietos = hijo.getElementsByTagName("child")
for nieto in listanietos:
id_nieto = nieto.attributes.get("archimateElement").value
if(serviceID == id_nieto):
return hijo.attributes.get("name").value
#Almacenamiento de nivel de acceso para un servicio concreto
def ServiceAccess(serviceID):
for nodo in lista:
if((nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel") and (str(nodo.attributes.get("name").value)) == VistaAcceso):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
listanietos = hijo.getElementsByTagName("child")
for nieto in listanietos:
id_nieto = nieto.attributes.get("archimateElement").value
if(serviceID == id_nieto):
return hijo.attributes.get("name").value
#Almacenamiento del grupo de dispositivos para un dispositivo
def DeviceGroup(deviceID):
for nodo in lista:
if((nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel") and (str(nodo.attributes.get("name").value)) == "Web institucional"):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
listanietos = hijo.getElementsByTagName("child")
for nieto in listanietos:
id_nieto = nieto.attributes.get("archimateElement").value
if(deviceID == id_nieto):
return hijo.attributes.get("name").value
#A partir de un Device o Infraestructure Service, llegar a la componente de aplicación afectada
def getUsedByChain(id_random):
for j in UsedByRelationshipArray:
if str(j[1]) == str(id_random):
k = 0
for i in ApplicationComponentArray:
if str(i[0]) == str(j[2]):
print " >>El app component "+ str(i[1]) + " está afectado"
getAffectedApplication(str(i[0]))
return
for j in UsedByRelationshipArray:
if str(j[2]) == str(id_random):
for i in ApplicationComponentArray:
if str(i[0]) == str(j[2]):
return
getUsedByChain(str(j[1]))
#Para un componente afectado, llegar hasta el Business Service afectado
def getAffectedApplication(id_componente):
listahijo = nodos[0].getElementsByTagName("folder")
for hijo in listahijo:
if (hijo.attributes.get("name").value == "Servicios Aplicacion"):
listanietos = hijo.getElementsByTagName("element")
for nieto in listanietos:
if (nieto.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel"):
listabis = nieto.getElementsByTagName("child")
for bis in listabis:
if(bis.attributes.get("archimateElement").value == str(id_componente)):
for rebis in listabis:
for j in BusinessServiceArray:
if rebis.attributes.get("archimateElement").value == str(j[0]):
print " >>"+ j[1] + " está afectado"
#Obtencion del nivel de acceso para un servicio
def getServiceAccess(serviceID):
for i in BusinessServiceArray:
if (i[0] == serviceID):
return i[5]
#Obtencion del nivel de criticidad para un servicio
def getServiceCritic(serviceID):
for i in BusinessServiceArray:
if (i[0] == serviceID):
return i[4]
# Obtener el grupo de un servicio a partir del identificador de servicio
def getServiceAllGroups(serviceID):
groups=[]
for i in ServicesPerGroupArray:
if(i[1] == serviceID):
groups.append(i[0])
return group
# Obtener los servicios de un grupo (de Servicios, Crticidad o por Rol) a través del identificador de grupo
def getGroupServices(groupID):
services=[]
for nodo in lista:
if(nodo.attributes.get("xsi:type").value == "archimate:ArchimateDiagramModel"):
listahijos = nodo.getElementsByTagName("child")
for hijo in listahijos:
if (hijo.attributes.get("xsi:type").value == "archimate:Group"):
if(hijo.attributes.get("id").value == groupID):
listanietos = hijo.getElementsByTagName("child")
for nieto in listanietos:
if (nieto.attributes.get("xsi:type").value == "archimate:DiagramObject"):
for i in BusinessServiceArray:
if( i[0] == nieto.attributes.get("archimateElement").value):
id_nieto = nieto.attributes.get("archimateElement").value
services.append(id_nieto)
return services
# Obtener los roles para los que un servicio está disponible
def getServiceRoles(serviceID):
roles = []
for i in ServicesPerGroupArray:
if(serviceID == i[1]):
for k in GroupRoleArray:
if(k[0] == i[0]):
roles.append(i[0])
return roles
# Obtener el grupo de servicios para un servicio (ID)
def getServiceGroup(serviceID):
for i in ServicesPerGroupArray:
if (i[1] == serviceID):
for k in GroupArray:
if (k[0] == i[0]):
return k[1]
# Obtener los servicios de un rol a traves del identificador de rol
def getRoleServices(roleID):
j = 1
for i in AssociationRelationshipArray:
if (roleID == i[1]):
print("\n"+ str(j)+") " + str(getBusinessServiceName(i[2])))
j = j+1
elif (roleID == i[2]):
print("\n"+ str(j)+") " + str(getBusinessServiceName(i[1])))
j = j+1
# Comprobar qué dispositivos físicos están caídos
def getDownDevice():
for i in DeviceArray:
for j in i[3]:
if str(j) == "Down":
print "El dispositvo " + i[1] + " está caído \n"
getUsedByChain(str(i[0]))
elif str(j) == "Up":
print "El dispositvo " + i[1] + " funciona correctamente \n"
##Obtener nombres a partir de IDs##
def getBusinessRoleName(BusinessRoleID):
for i in BusinessRoleArray:
if(BusinessRoleID == i[0]):
name = i[1]
return name
def getBusinessServiceName(BusinessServiceID):
for i in BusinessServiceArray:
if(BusinessServiceID == i[0]):
name = i[1]
return name
def getGroupName(GroupID):
for i in GroupArray:
if(GroupID == i[0]):
name = i[1]
return name
for i in GroupRoleArray:
if(GroupID == i[0]):
name = i[1]
return name
for i in GroupCriticArray:
if(GroupID == i[0]):
name = i[1]
return name
for i in GroupAccessArray:
if(GroupID == i[0]):
name = i[1]
return name
##Obtener IDs a partir de nombres##
def getBusinessRoleID(BusinessRoleName):
for i in BusinessRoleArray:
if(BusinessRoleName == i[1]):
ID = i[0]
return ID
def getBusinessServiceID(BusinessServiceName):
for i in BusinessServiceArray:
if(BusinessServiceName == i[1]):
ID = i[0]
return ID
def getGroupID(GroupName):
for i in GroupArray:
if(GroupName == i[1]):
ID = i[0]
return ID
for i in GroupCriticArray:
if(GroupName == i[1]):
ID = i[0]
return ID
for i in GroupRoleArray:
if(GroupName == i[1]):
ID = i[0]
return ID
#Devuelve doble lista Key-Value de propiedades de un servicio
def getServiceProperties(serviceID):
for i in BusinessServiceArray:
if (i[0] == serviceID):
key = i[2]
value = i[3]
propertylist = [key,value]
return propertylist
def inicializacion():
cargaBusinessService()
cargaBusinessRole()
cargaAssociationRelationship()
cargaAppComponent()
cargaUsedByRelationship()
cargaGroup()
cargaDevice()
getDownDevice()
BusinessServicePorGroup()
#runtest()
############### TEST ###############
def runtest():
service_nogroup_test()
service_nocriticgroup_test()
service_noaccessgroup_test()
service_multiplegroup_test()
service_multiplecriticgroup_test()
service_multipleaccessgroup_test()
def service_nogroup_test():
for i in BusinessServiceArray:
k = 0
for j in GroupArray:
if([j[0],i[0]] in ServicesPerGroupArray):
k=1
break
if (k == 0):
print " \n [!] WARNING [!] El Servicio "+ i[1] + " no pertenece a ningún Grupo de Servicios \n"
def service_nocriticgroup_test():
for i in BusinessServiceArray:
k = 0
for j in GroupCriticArray:
if([j[0],i[0]] in ServicesPerGroupArray):
k=1
break
if (k == 0):
print " \n [!] WARNING [!] El Servicio "+ i[1] + " no pertenece a ningún Grupo de Criticidad \n"
def service_noaccessgroup_test():
for i in BusinessServiceArray:
k = 0
for j in GroupAccessArray:
if([j[0],i[0]] in ServicesPerGroupArray):
k=1
break
if (k == 0):
print " \n [!] WARNING [!] El Servicio "+ i[1] + " no pertenece a ningún Nivel de Acceso \n"
def service_multiplegroup_test():
for i in BusinessServiceArray:
groups = []
k = 0
for j in GroupArray:
if(([j[0],i[0]] in ServicesPerGroupArray) and (k == 0)):
groups.append(j[0])
k=1
elif(([j[0],i[0]] in ServicesPerGroupArray) and (k == 1)):
groups.append(j[0])
k=2
elif(([j[0],i[0]] in ServicesPerGroupArray)):
groups.append(j[0])
if (k == 2):
errorMessage = " \n [!] WARNING [!] El Servicio "+ i[1] + " pertenece a varios Grupos de Servicios: "
for m in groups:
errorMessage += "[" + getGroupName(m) + "] "
print errorMessage
def service_multiplecriticgroup_test():
for i in BusinessServiceArray:
groups = []
k = 0
for j in GroupCriticArray:
if(([j[0],i[0]] in ServicesPerGroupArray) and (k == 0)):
groups.append(j[0])
k=1
elif(([j[0],i[0]] in ServicesPerGroupArray) and (k == 1)):
groups.append(j[0])
k=2
elif(([j[0],i[0]] in ServicesPerGroupArray)):
groups.append(j[0])
if (k == 2):
errorMessage = " \n [!] WARNING [!] El Servicio "+ i[1] + " pertenece a varios Niveles de Criticidad: "
for m in groups:
errorMessage += "[" + getGroupName(m) + "] "
print errorMessage
def service_multipleaccessgroup_test():
for i in BusinessServiceArray:
groups = []
k = 0
for j in GroupAccessArray:
if(([j[0],i[0]] in ServicesPerGroupArray) and (k == 0)):
groups.append(j[0])
k=1
elif(([j[0],i[0]] in ServicesPerGroupArray) and (k == 1)):
groups.append(j[0])
k=2
elif(([j[0],i[0]] in ServicesPerGroupArray)):
groups.append(j[0])
if (k == 2):
errorMessage = " \n [!] WARNING [!] El Servicio "+ i[1] + " pertenece a varios Niveles de Acceso: "
for m in groups:
errorMessage += "[" + getGroupName(m) + "] "
print errorMessage
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals
from werkzeug.local import Local, release_local
from functools import wraps
import os, importlib, inspect, logging, json
# public
from frappe.__version__ import __version__
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
if not lang:
lang = local.lang
if lang == "en":
return msg
from frappe.translate import get_full_dict
return get_full_dict(local.lang).get(msg) or msg
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
if local.lang=="en":
return {}
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = None
local.user = None
local.user_obj = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.jenv = None
local.jloader =None
local.cache = {}
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
local.form_dict = _dict()
local.session = _dict()
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
return _dict(config)
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get("cache_redis_server") or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
import utils
return utils.get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
from utils import cstr
if not request or (not "cmd" in local.form_dict):
print cstr(msg)
error_log.append(cstr(msg))
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print repr(msg)
from utils import cstr
debug_log.append(cstr(msg))
def msgprint(msg, small=0, raise_exception=0, as_table=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param small: [optional] Show as a floating message in the footer.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
from utils import cstr, encode
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception, encode(msg)
else:
raise ValidationError, encode(msg)
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages:
print "Message: " + repr(msg).encode("utf-8")
message_log.append((small and '__small:' or '')+cstr(msg or ''))
_raise_exception()
def throw(msg, exc=ValidationError):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_obj = None
def get_user():
from frappe.utils.user import User
if not local.user_obj:
local.user_obj = User(local.session.user)
return local.user_obj
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.utils.user
return frappe.utils.user.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=(), sender="", subject="No Subject", message="No Message",
as_markdown=False, bulk=False, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=(), show_as_cc=(), message_id=None, as_bulk=False, send_after=None, expose_recipients=False,
bulk_priority=1):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param bulk: Send via scheduled email sender **Bulk Email**. Don't send immediately.
:param bulk_priority: Priority for bulk email, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To email id.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
"""
if bulk or as_bulk:
import frappe.email.bulk
frappe.email.bulk.send(recipients=recipients, sender=sender,
subject=subject, message=content or message,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, show_as_cc=show_as_cc, message_id=message_id, send_after=send_after,
expose_recipients=expose_recipients, bulk_priority=bulk_priority)
else:
import frappe.email
if as_markdown:
frappe.email.sendmail_md(recipients, sender=sender,
subject=subject, msg=content or message, attachments=attachments, reply_to=reply_to,
cc=cc, message_id=message_id)
else:
frappe.email.sendmail(recipients, sender=sender,
subject=subject, msg=content or message, attachments=attachments, reply_to=reply_to,
cc=cc, message_id=message_id)
logger = None
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.sessions
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
frappe.local.cache = {}
for fn in frappe.get_hooks("clear_cache"):
get_attr(fn)()
frappe.local.role_permissions = {}
def has_permission(doctype, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doctype, ptype="read", doc=None, user=None, verbose=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
if isinstance(doc, basestring):
doc = get_doc(doctype, doc)
for method in hooks:
result = call(get_attr(method), doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224((txt or "") + repr(time.time()) + repr(random_string(8))).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(arg1, arg2=None):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
return frappe.model.document.get_doc(arg1, arg2)
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags)
def delete_doc_if_exists(doctype, name):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name)
def reload_doctype(doctype, force=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype), force=force)
def reload_doc(module, dt=None, dn=None, force=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force)
def rename_doc(doctype, old, new, debug=0, force=False, merge=False, ignore_permissions=False):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(doctype, old, new, force=force, merge=merge, ignore_permissions=ignore_permissions)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_frappe=False, with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
apps.extend(get_file_items(os.path.join(local.site_path, "apps.txt")))
if with_frappe:
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
def append_hook(target, key, value):
if isinstance(value, dict):
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
append_to_list(target, key, value)
def append_to_list(target, key, value):
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
from frappe.utils import cstr
if isinstance(path, unicode):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return cstr(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties)."""
args = _dict(args)
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field or "DocField",
'doc_type': args.doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import Tool."""
from frappe.core.page.data_import_tool import data_import_tool
data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.name = None
newdoc.set("__islocal", 1)
newdoc.owner = None
newdoc.creation = None
newdoc.amended_from = None
newdoc.amendment_date = None
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for d in newdoc.get_all_children():
d.name = None
d.parent = None
d.set("__islocal", 1)
d.owner = None
d.creation = None
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code."""
local.message_title = title
local.message = html
local.message_success = success
local.response['type'] = 'page'
local.response['page_name'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for conditions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def add_version(doc):
"""Insert a new **Version** of the given document.
A **Version** is a JSON dump of the current document state."""
get_doc({
"doctype": "Version",
"ref_doctype": doc.doctype,
"docname": doc.name,
"doclist_json": as_json(doc.as_dict())
}).insert(ignore_permissions=True)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler)
def are_emails_muted():
return flags.mute_emails or conf.get("mute_emails") or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(value, df, doc=None, currency=None):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(value, df, doc, currency=currency)
def get_print(doctype, name, print_format=None, style=None, html=None, as_pdf=False):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
if not html:
html = build_page("print")
if as_pdf:
return get_pdf(html)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
local.flags.ignore_print_permissions = True
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
return out
logging_setup_complete = False
def get_logger(module=None):
from frappe.setup_logging import setup_logging
global logging_setup_complete
if not logging_setup_complete:
setup_logging()
logging_setup_complete = True
return logging.getLogger(module or "frappe")
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.async
return frappe.async.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
[fix] frappe._ source message should always be converted to unicode
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals
from werkzeug.local import Local, release_local
from functools import wraps
import os, importlib, inspect, logging, json
# public
from frappe.__version__ import __version__
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
from frappe.translate import get_full_dict
from frappe.utils import cstr
if not lang:
lang = local.lang
# msg should always be unicode
msg = cstr(msg)
if lang == "en":
return msg
return get_full_dict(local.lang).get(msg) or msg
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
if local.lang=="en":
return {}
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = None
local.user = None
local.user_obj = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.jenv = None
local.jloader =None
local.cache = {}
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
local.form_dict = _dict()
local.session = _dict()
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
return _dict(config)
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get("cache_redis_server") or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
import utils
return utils.get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
from utils import cstr
if not request or (not "cmd" in local.form_dict):
print cstr(msg)
error_log.append(cstr(msg))
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print repr(msg)
from utils import cstr
debug_log.append(cstr(msg))
def msgprint(msg, small=0, raise_exception=0, as_table=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param small: [optional] Show as a floating message in the footer.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
from utils import cstr, encode
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception, encode(msg)
else:
raise ValidationError, encode(msg)
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages:
print "Message: " + repr(msg).encode("utf-8")
message_log.append((small and '__small:' or '')+cstr(msg or ''))
_raise_exception()
def throw(msg, exc=ValidationError):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_obj = None
def get_user():
from frappe.utils.user import User
if not local.user_obj:
local.user_obj = User(local.session.user)
return local.user_obj
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.utils.user
return frappe.utils.user.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=(), sender="", subject="No Subject", message="No Message",
as_markdown=False, bulk=False, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=(), show_as_cc=(), message_id=None, as_bulk=False, send_after=None, expose_recipients=False,
bulk_priority=1):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param bulk: Send via scheduled email sender **Bulk Email**. Don't send immediately.
:param bulk_priority: Priority for bulk email, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To email id.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
"""
if bulk or as_bulk:
import frappe.email.bulk
frappe.email.bulk.send(recipients=recipients, sender=sender,
subject=subject, message=content or message,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, show_as_cc=show_as_cc, message_id=message_id, send_after=send_after,
expose_recipients=expose_recipients, bulk_priority=bulk_priority)
else:
import frappe.email
if as_markdown:
frappe.email.sendmail_md(recipients, sender=sender,
subject=subject, msg=content or message, attachments=attachments, reply_to=reply_to,
cc=cc, message_id=message_id)
else:
frappe.email.sendmail(recipients, sender=sender,
subject=subject, msg=content or message, attachments=attachments, reply_to=reply_to,
cc=cc, message_id=message_id)
logger = None
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.sessions
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
frappe.local.cache = {}
for fn in frappe.get_hooks("clear_cache"):
get_attr(fn)()
frappe.local.role_permissions = {}
def has_permission(doctype, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doctype, ptype="read", doc=None, user=None, verbose=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
if isinstance(doc, basestring):
doc = get_doc(doctype, doc)
for method in hooks:
result = call(get_attr(method), doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224((txt or "") + repr(time.time()) + repr(random_string(8))).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(arg1, arg2=None):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
return frappe.model.document.get_doc(arg1, arg2)
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags)
def delete_doc_if_exists(doctype, name):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name)
def reload_doctype(doctype, force=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype), force=force)
def reload_doc(module, dt=None, dn=None, force=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force)
def rename_doc(doctype, old, new, debug=0, force=False, merge=False, ignore_permissions=False):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(doctype, old, new, force=force, merge=merge, ignore_permissions=ignore_permissions)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_frappe=False, with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
apps.extend(get_file_items(os.path.join(local.site_path, "apps.txt")))
if with_frappe:
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
def append_hook(target, key, value):
if isinstance(value, dict):
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
append_to_list(target, key, value)
def append_to_list(target, key, value):
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
from frappe.utils import cstr
if isinstance(path, unicode):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return cstr(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties)."""
args = _dict(args)
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field or "DocField",
'doc_type': args.doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import Tool."""
from frappe.core.page.data_import_tool import data_import_tool
data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.name = None
newdoc.set("__islocal", 1)
newdoc.owner = None
newdoc.creation = None
newdoc.amended_from = None
newdoc.amendment_date = None
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for d in newdoc.get_all_children():
d.name = None
d.parent = None
d.set("__islocal", 1)
d.owner = None
d.creation = None
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code."""
local.message_title = title
local.message = html
local.message_success = success
local.response['type'] = 'page'
local.response['page_name'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for conditions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_poge_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def add_version(doc):
"""Insert a new **Version** of the given document.
A **Version** is a JSON dump of the current document state."""
get_doc({
"doctype": "Version",
"ref_doctype": doc.doctype,
"docname": doc.name,
"doclist_json": as_json(doc.as_dict())
}).insert(ignore_permissions=True)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler)
def are_emails_muted():
return flags.mute_emails or conf.get("mute_emails") or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(value, df, doc=None, currency=None):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(value, df, doc, currency=currency)
def get_print(doctype, name, print_format=None, style=None, html=None, as_pdf=False):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
if not html:
html = build_page("print")
if as_pdf:
return get_pdf(html)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
local.flags.ignore_print_permissions = True
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
return out
logging_setup_complete = False
def get_logger(module=None):
from frappe.setup_logging import setup_logging
global logging_setup_complete
if not logging_setup_complete:
setup_logging()
logging_setup_complete = True
return logging.getLogger(module or "frappe")
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.async
return frappe.async.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
|
# -*- coding: utf-8 -*-
"""
=============================================================================
Generate defects in structure data (:mod:`sknano.nanogen._defect_generators`)
=============================================================================
.. currentmodule:: sknano.nanogen._defect_generators
"""
from __future__ import absolute_import, division, print_function
__docformat__ = 'restructuredtext en'
from collections import OrderedDict
import os
import numpy as np
from ..structure_io import DATAReader, DATAWriter, XYZWriter, \
XYZ2DATAConverter, StructureFormatError, supported_structure_formats
from ..structure_io.atoms import LAMMPSAtoms as Atoms
xyz_tuple = ('x', 'y', 'z')
vac_type_cluster_size_map = {'double': 2, 'triple': 3}
__all__ = ['DefectGenerator', 'CrossLinkedDefectGenerator',
'StoneWalesDefectGenerator', 'VacancyGenerator']
class DefectGenerator(object):
"""Base class for generating defects in structure data.
Parameters
----------
fname : str
structure data filename
outpath : str, optional
Output path for structure data file.
structure_format : {None, str}, optional
chemical file format of saved structure data.
If `None`, then guess based on `fname` file extension.
Otherwise, must be one of:
- `xyz`
- `data`
verbose : bool, optional
Verbose output
"""
def __init__(self, fname=str, outpath=None, structure_format=None,
verbose=False):
if fname.endswith(supported_structure_formats) and \
structure_format is None:
for ext in supported_structure_formats:
if fname.endswith(ext):
structure_format = ext
break
else:
if (not fname.endswith(supported_structure_formats) and
structure_format is None) or \
(structure_format is not None and
structure_format not in supported_structure_formats):
raise StructureFormatError(
'{} is not a supported structure format'.format(
structure_format))
self._fname = fname
self._outpath = outpath
self._structure_format = structure_format
self._verbose = verbose
# parse structure data
if self._structure_format == 'data':
self._structure_data = DATAReader(fname)
elif self._structure_format == 'xyz':
self._structure_data = \
XYZ2DATAConverter(fname).convert(return_reader=True)
self._atoms = self._structure_data.atoms
self._atom_ids = self._atoms.atom_ids
self._atom_coords = self._atoms.get_coords(as_dict=True)
@property
def atoms(self):
return self._atoms
@property
def atom_ids(self):
return self._atom_ids
@property
def atom_coords(self):
return self._atom_coords
class CrossLinkedDefectGenerator(DefectGenerator):
pass
class StoneWalesDefectGenerator(DefectGenerator):
pass
class VacancyGenerator(DefectGenerator):
"""Class for generating vacancies in structure data.
Parameters
----------
fname : str
structure data filename
outpath : str, optional
Output path for structure data file.
structure_format : {None, str}, optional
chemical file format of saved structure data.
If `None`, then guess based on `fname` file extension.
Otherwise, must be one of:
- `xyz`
- `data`
verbose : bool, optional
Verbose output
"""
def __init__(self, fname=str, outpath=None, structure_format=None,
verbose=False):
super(VacancyGenerator, self).__init__(
fname=fname, outpath=outpath, structure_format=structure_format,
verbose=verbose)
self._Nvacs = 0
self._Nvac_clusters = 0
self._Nvac_sites = 0
self._vac_bounds = OrderedDict(zip(xyz_tuple, 3 * [2 * [None]]))
self._vac_ids = np.empty(0, dtype=int)
self._vac_type = 'single'
self._cluster_size = 1
self._vmd_selection_radius = np.sqrt(10.5)
self._show_vmd_selection_cmd = True
@property
def Nvacs(self):
return self._Nvacs
@Nvacs.setter
def Nvacs(self, value=int):
self._Nvacs = value
@property
def Nvac_clusters(self):
return self._Nvac_clusters
@Nvac_clusters.setter
def Nvac_clusters(self, value=int):
self._Nvac_clusters = value
@property
def Nvac_sites(self):
return self._Nvac_sites
@Nvac_sites.setter
def Nvac_sites(self, value=int):
self._Nvac_sites = value
@property
def cluster_size(self):
return self._cluster_size
@cluster_size.setter
def cluster_size(self, value):
self._cluster_size = value
@property
def vac_bounds(self):
return self._vac_bounds
@vac_bounds.setter
def vac_bounds(self, value):
self._vac_bounds = value
@property
def vac_ids(self):
return self._vac_ids
@property
def vac_type(self):
return self._vac_type
@vac_type.setter
def vac_type(self, value):
self._vac_type = value
@property
def vmd_selection_radius(self):
return self._vmd_selection_radius
@vmd_selection_radius.setter
def vmd_selection_radius(self, value):
self._vmd_selection_radius = value
@property
def show_vmd_selection_cmd(self):
return self._show_vmd_selection_cmd
@show_vmd_selection_cmd.setter
def show_vmd_selection_cmd(self, value=bool):
self._show_vmd_selection_cmd = value
def _random_vacancy_generator(self):
"""Generate random vacancies in structure data."""
self._vac_ids = \
np.random.choice(self._atom_ids,
size=self._Nvac_sites,
replace=False)
def _generate_vmd_selection_cmd(self):
selection_radius = self._vmd_selection_radius
selections = []
for atom in self._removed_atoms:
selection_cmd = \
"(((x-{:.4f})^2 + ".format(atom.x) + \
"(y-{:.4f})^2 + ".format(atom.y) + \
"(z-{:.4f})^2) <= {:.2f})".format(atom.z,
selection_radius**2)
selections.append(selection_cmd)
vmd_selection_cmd = ' or '.join(selections)
print('copy and paste the following VMD command to select\n'
'the atoms surrounding the vacancies:\n\n'
'{}\n'.format(vmd_selection_cmd))
def _generate_single_vacancies(self):
self._removed_atoms = \
self._atoms.filter_atoms(self._vac_ids, invert=False)
def _generate_multi_vacancies(self):
vac_type_properties = {'double': {'cluster_size': 2,
'NN_cutoff': 1.5},
'triple': {'cluster_size': 3,
'NN_cutoff': 1.5}}
vac_props = vac_type_properties[self._vac_type]
self._cluster_size = vac_props['cluster_size']
self._atoms.NN_cutoff = vac_props['NN_cutoff']
self._atoms.update_nearest_neighbors()
vac_atoms = Atoms()
for vac_id in self._vac_ids:
vac_atom = self._atoms.get_atom(atomID=vac_id)
vac_atoms.append(vac_atom)
vac_atoms.extend(np.random.choice(vac_atom.NN,
size=self._cluster_size-1,
replace=False).tolist())
self._removed_atoms = \
self._atoms.filter_atoms(vac_atoms.atom_ids, invert=False)
def _generate_vacancy_structure(self):
"""Generate vacancy structure."""
if self._vac_type in ('double', 'triple'):
self._generate_multi_vacancies()
else:
self._generate_single_vacancies()
if self._show_vmd_selection_cmd:
self._generate_vmd_selection_cmd()
self._Nvacs = self._removed_atoms.Natoms
self._remaining_atoms = \
self._atoms.filter_atoms(self._removed_atoms.atom_ids, invert=True)
#remaining_atoms.assign_unique_ids()
self._save_vacancy_structure_data()
def _generate_output_fname(self):
self._output_fname = \
os.path.splitext(os.path.basename(self._fname))[0] + \
'+{}_vacancies'.format(self._Nvacs)
def _save_vacancy_structure_data(self):
self._generate_output_fname()
DATAWriter.write(fname=self._output_fname, outpath=self._outpath,
atoms=self._remaining_atoms,
boxbounds=self._structure_data.boxbounds,
comment_line=self._structure_data.comment_line)
XYZWriter.write(fname=self._output_fname, outpath=self._outpath,
atoms=self._remaining_atoms,
comment_line=self._structure_data.comment_line)
Adding code for StoneWalesDefectGenerator class
# -*- coding: utf-8 -*-
"""
=============================================================================
Generate defects in structure data (:mod:`sknano.nanogen._defect_generators`)
=============================================================================
.. currentmodule:: sknano.nanogen._defect_generators
"""
from __future__ import absolute_import, division, print_function
__docformat__ = 'restructuredtext en'
from collections import OrderedDict
import os
import numpy as np
from ..structure_io import DATAReader, DATAWriter, XYZWriter, \
XYZ2DATAConverter, StructureFormatError, supported_structure_formats
from ..structure_io.atoms import LAMMPSAtoms as Atoms
xyz_tuple = ('x', 'y', 'z')
vac_type_cluster_size_map = {'double': 2, 'triple': 3}
__all__ = ['DefectGenerator', 'CrossLinkedDefectGenerator',
'StoneWalesDefectGenerator', 'VacancyGenerator']
class DefectGenerator(object):
"""Base class for generating defects in structure data.
Parameters
----------
fname : str
structure data filename
outpath : str, optional
Output path for structure data file.
structure_format : {None, str}, optional
chemical file format of saved structure data.
If `None`, then guess based on `fname` file extension.
Otherwise, must be one of:
- `xyz`
- `data`
verbose : bool, optional
Verbose output
"""
def __init__(self, fname=str, outpath=None, structure_format=None,
verbose=False):
if fname.endswith(supported_structure_formats) and \
structure_format is None:
for ext in supported_structure_formats:
if fname.endswith(ext):
structure_format = ext
break
else:
if (not fname.endswith(supported_structure_formats) and
structure_format is None) or \
(structure_format is not None and
structure_format not in supported_structure_formats):
raise StructureFormatError(
'{} is not a supported structure format'.format(
structure_format))
self._fname = fname
self._outpath = outpath
self._structure_format = structure_format
self._verbose = verbose
# parse structure data
if self._structure_format == 'data':
self._structure_data = DATAReader(fname)
elif self._structure_format == 'xyz':
self._structure_data = \
XYZ2DATAConverter(fname).convert(return_reader=True)
self._atoms = self._structure_data.atoms
self._atom_ids = self._atoms.atom_ids
self._atom_coords = self._atoms.get_coords(as_dict=True)
@property
def atoms(self):
return self._atoms
@property
def atom_ids(self):
return self._atom_ids
@property
def atom_coords(self):
return self._atom_coords
class CrossLinkedDefectGenerator(DefectGenerator):
pass
class StoneWalesDefectGenerator(DefectGenerator):
"""Class for generating Stone-Wales defects in structure data.
Parameters
----------
fname : str
structure data filename
outpath : str, optional
Output path for structure data file.
structure_format : {None, str}, optional
chemical file format of saved structure data.
If `None`, then guess based on `fname` file extension.
Otherwise, must be one of:
- `xyz`
- `data`
verbose : bool, optional
Verbose output
"""
def __init__(self, fname=str, outpath=None, structure_format=None,
verbose=False):
super(StoneWalesDefectGenerator, self).__init__(
fname=fname, outpath=outpath, structure_format=structure_format,
verbose=verbose)
self._defect_bounds = OrderedDict(zip(xyz_tuple, 3 * [2 * [None]]))
self._vmd_selection_radius = np.sqrt(10.5)
self._show_vmd_selection_cmd = True
class VacancyGenerator(DefectGenerator):
"""Class for generating vacancies in structure data.
Parameters
----------
fname : str
structure data filename
outpath : str, optional
Output path for structure data file.
structure_format : {None, str}, optional
chemical file format of saved structure data.
If `None`, then guess based on `fname` file extension.
Otherwise, must be one of:
- `xyz`
- `data`
verbose : bool, optional
Verbose output
"""
def __init__(self, fname=str, outpath=None, structure_format=None,
verbose=False):
super(VacancyGenerator, self).__init__(
fname=fname, outpath=outpath, structure_format=structure_format,
verbose=verbose)
self._Nvacs = 0
self._Nvac_clusters = 0
self._Nvac_sites = 0
self._vac_bounds = OrderedDict(zip(xyz_tuple, 3 * [2 * [None]]))
self._vac_ids = np.empty(0, dtype=int)
self._vac_type = 'single'
self._cluster_size = 1
self._vmd_selection_radius = np.sqrt(10.5)
self._show_vmd_selection_cmd = True
@property
def Nvacs(self):
return self._Nvacs
@Nvacs.setter
def Nvacs(self, value=int):
self._Nvacs = value
@property
def Nvac_clusters(self):
return self._Nvac_clusters
@Nvac_clusters.setter
def Nvac_clusters(self, value=int):
self._Nvac_clusters = value
@property
def Nvac_sites(self):
return self._Nvac_sites
@Nvac_sites.setter
def Nvac_sites(self, value=int):
self._Nvac_sites = value
@property
def cluster_size(self):
return self._cluster_size
@cluster_size.setter
def cluster_size(self, value):
self._cluster_size = value
@property
def vac_bounds(self):
return self._vac_bounds
@vac_bounds.setter
def vac_bounds(self, value):
self._vac_bounds = value
@property
def vac_ids(self):
return self._vac_ids
@property
def vac_type(self):
return self._vac_type
@vac_type.setter
def vac_type(self, value):
self._vac_type = value
@property
def vmd_selection_radius(self):
return self._vmd_selection_radius
@vmd_selection_radius.setter
def vmd_selection_radius(self, value):
self._vmd_selection_radius = value
@property
def show_vmd_selection_cmd(self):
return self._show_vmd_selection_cmd
@show_vmd_selection_cmd.setter
def show_vmd_selection_cmd(self, value=bool):
self._show_vmd_selection_cmd = value
def _random_vacancy_generator(self):
"""Generate random vacancies in structure data."""
self._vac_ids = \
np.random.choice(self._atom_ids,
size=self._Nvac_sites,
replace=False)
def _generate_vmd_selection_cmd(self):
selection_radius = self._vmd_selection_radius
selections = []
for atom in self._removed_atoms:
selection_cmd = \
"(((x-{:.4f})^2 + ".format(atom.x) + \
"(y-{:.4f})^2 + ".format(atom.y) + \
"(z-{:.4f})^2) <= {:.2f})".format(atom.z,
selection_radius**2)
selections.append(selection_cmd)
vmd_selection_cmd = ' or '.join(selections)
print('copy and paste the following VMD command to select\n'
'the atoms surrounding the vacancies:\n\n'
'{}\n'.format(vmd_selection_cmd))
def _generate_single_vacancies(self):
self._removed_atoms = \
self._atoms.filter_atoms(self._vac_ids, invert=False)
def _generate_multi_vacancies(self):
vac_type_properties = {'double': {'cluster_size': 2,
'NN_cutoff': 1.5},
'triple': {'cluster_size': 3,
'NN_cutoff': 1.5}}
vac_props = vac_type_properties[self._vac_type]
self._cluster_size = vac_props['cluster_size']
self._atoms.NN_cutoff = vac_props['NN_cutoff']
self._atoms.update_nearest_neighbors()
vac_atoms = Atoms()
for vac_id in self._vac_ids:
vac_atom = self._atoms.get_atom(atomID=vac_id)
vac_atoms.append(vac_atom)
vac_atoms.extend(np.random.choice(vac_atom.NN,
size=self._cluster_size-1,
replace=False).tolist())
self._removed_atoms = \
self._atoms.filter_atoms(vac_atoms.atom_ids, invert=False)
def _generate_vacancy_structure(self):
"""Generate vacancy structure."""
if self._vac_type in ('double', 'triple'):
self._generate_multi_vacancies()
else:
self._generate_single_vacancies()
if self._show_vmd_selection_cmd:
self._generate_vmd_selection_cmd()
self._Nvacs = self._removed_atoms.Natoms
self._remaining_atoms = \
self._atoms.filter_atoms(self._removed_atoms.atom_ids, invert=True)
#remaining_atoms.assign_unique_ids()
self._save_vacancy_structure_data()
def _generate_output_fname(self):
self._output_fname = \
os.path.splitext(os.path.basename(self._fname))[0] + \
'+{}_vacancies'.format(self._Nvacs)
def _save_vacancy_structure_data(self):
self._generate_output_fname()
DATAWriter.write(fname=self._output_fname, outpath=self._outpath,
atoms=self._remaining_atoms,
boxbounds=self._structure_data.boxbounds,
comment_line=self._structure_data.comment_line)
XYZWriter.write(fname=self._output_fname, outpath=self._outpath,
atoms=self._remaining_atoms,
comment_line=self._structure_data.comment_line)
|
'''
Created on 2015/06/02
:author: hubo
'''
from __future__ import print_function, absolute_import, division
from collections import deque
from .matchtree import MatchTree
from .event import Event, withIndices
from bisect import bisect_left
from heapq import heappush, heappop
import weakref
@withIndices('queue')
class QueueCanWriteEvent(Event):
pass
@withIndices('queue')
class QueueIsEmptyEvent(Event):
pass
@withIndices('newonly', 'firstonly')
class AutoClassQueueCanWriteEvent(QueueCanWriteEvent):
pass
class CBQueue(object):
'''
A multi-queue model with priority and balance.
When first created, there is a default queue with priority 0. More sub-queues maybe created with addSubQueue.
Each sub-queue is a CBQueue which accepts more sub-queues. Sub-queues are considered as black-box to the outer parent.
'''
class FifoQueue(object):
'''
A wrapper for a FIFO queue
'''
def __init__(self, parent = None, maxlength = None):
self.queue = deque()
self.parent = parent
self.maxlength = maxlength
self.blocked = False
if self.maxlength is not None and self.maxlength <= 0:
self.maxlength = 1
self.isWaited = False
def append(self, value, force = False):
if not force and not self.canAppend():
self.isWaited = True
return QueueCanWriteEvent.createMatcher(self)
if self.parent is not None:
m = self.parent.notifyAppend(self, force)
if m is not None:
return m
self.queue.append(value)
return None
def canAppend(self):
return self.maxlength is None or len(self.queue) < self.maxlength
def canPop(self):
return self.queue and not self.blocked
def pop(self):
ret = self._pop()
if self.parent is not None:
pr = self.parent.notifyPop(self)
ret[1].extend(pr[0])
ret[2].extend(pr[1])
return ret
def _pop(self):
if self.blocked:
raise IndexError('pop from a blocked queue')
ret = self.queue.popleft()
if self.isWaited and self.canAppend():
self.isWaited = False
return (ret, [QueueCanWriteEvent(self)], [])
else:
return (ret, [], [])
def clear(self):
l = len(self)
ret = self._clear()
if self.parent is not None:
pr = self.parent.notifyPop(self, l)
ret[0].extend(pr[0])
ret[1].extend(pr[1])
return ret
def _clear(self):
if self.blocked:
self.unblockall()
self.queue.clear()
if self.isWaited and self.canAppend():
self.isWaited = False
return (QueueCanWriteEvent(self), [])
else:
return ([], [])
def __len__(self):
return len(self.queue)
def block(self, value):
if self.parent is not None:
self.parent.notifyAppend(self, True)
self.queue.appendleft(value)
if not self.blocked:
self.blocked = True
if self.parent is not None:
self.parent.notifyBlock(self, True)
def unblock(self, value):
if self.blocked:
self.blocked = False
if self.parent is not None:
self.parent.notifyBlock(self, False)
def unblockall(self):
if self.blocked:
self.blocked = False
if self.parent is not None:
self.parent.notifyBlock(self, False)
class PriorityQueue(object):
'''
A queue with inner built priority. Event must have a "priority" property to use with this type of queue.
For fail-safe, events without "priority" property have the lowest priority.
NOTICE: different from the queue priority, the priority property is smaller-higher, and is not limited to integers.
This allows datetime to be used as an increasing priority
'''
def __init__(self, parent = None, maxlength = None, key = 'priority'):
# a heap
self.queue = []
self.deque = deque()
self.parent = parent
self.maxlength = maxlength
self.blocks = set()
if self.maxlength is not None and self.maxlength <= 0:
self.maxlength = 1
self.blocked = False
self.isWaited = False
self.key = key
@classmethod
def initHelper(cls, key = 'priority'):
def initer(parent = None, maxlength = None):
return cls(parent, maxlength, key)
return initer
def append(self, value, force = False):
if not force and not self.canAppend():
self.isWaited = True
return QueueCanWriteEvent.createMatcher(self)
if self.parent is not None:
m = self.parent.notifyAppend(self, force)
if m is not None:
return m
if hasattr(value, self.key):
heappush(self.queue, (getattr(value, self.key), value))
# a priority push may change the block status
if self.blocked and not self.queue[0][1] in self.blocks:
self.blocked = False
if self.parent is not None:
self.parent.notifyBlock(self, False)
else:
self.deque.append(value)
return None
def canAppend(self):
return self.maxlength is None or len(self.queue) + len(self.deque) < self.maxlength
def canPop(self):
return len(self.queue) + len(self.deque) > 0 and not self.blocked
def pop(self):
ret = self._pop()
if self.parent is not None:
pr = self.parent.notifyPop(self)
ret[1].extend(pr[0])
ret[2].extend(pr[1])
return ret
def _pop(self):
if self.blocked:
raise IndexError('pop from a blocked queue')
if self.queue:
ret = heappop(self.queue)[1]
else:
ret = self.deque.popleft()
if self.queue:
blocked = self.queue[0][1] in self.blocks
elif self.deque:
blocked = self.deque[0] in self.blocks
else:
blocked = False
if self.blocked != blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
if self.isWaited and self.canAppend():
self.isWaited = False
return (ret, [QueueCanWriteEvent(self)], [])
else:
return (ret, [], [])
def _clear(self):
if self.blocks:
self.unblockall()
del self.queue[:]
self.deque.clear()
if self.isWaited and self.canAppend():
self.isWaited = False
return ([QueueCanWriteEvent(self)], [])
else:
return ([], [])
def clear(self):
l = len(self)
ret = self._clear()
if self.parent is not None:
pr = self.parent.notifyPop(self, l)
ret[0].extend(pr[0])
ret[1].extend(pr[1])
return ret
def __len__(self):
return len(self.queue) + len(self.deque)
def block(self, value):
self.blocks.add(value)
if self.parent is not None:
self.parent.notifyAppend(self, True)
if hasattr(value, self.key):
heappush(self.queue, (getattr(value, self.key), value))
else:
self.deque.appendleft(value)
if self.queue:
blocked = self.queue[0][1] in self.blocks
elif self.deque:
blocked = self.deque[0] in self.blocks
else:
blocked = False
if self.blocked != blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
def unblock(self, value):
self.blocks.remove(value)
if self.queue:
blocked = self.queue[0][1] in self.blocks
elif self.deque:
blocked = self.deque[0] in self.blocks
else:
blocked = False
if self.blocked != blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
def unblockall(self):
self.blocks.clear()
blocked = False
if self.blocked != blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
class MultiQueue(object):
'''
A multi-queue container, every queue in a multi-queue has the same priority, and is popped in turn.
'''
class CircleListNode(object):
'''
Circle link list
'''
def __init__(self, value):
self.prev = self
self.value = value
self.next = self
def insertprev(self, node):
self.prev.next = node
node.prev = self.prev
node.next = self
self.prev = node
return self
def remove(self):
if self.next is self:
return None
self.prev.next = self.next
self.next.prev = self.prev
ret = self.next
self.next = self
self.prev = self
return ret
class CircleList(object):
def __init__(self):
self.current = None
def remove(self, node):
if self.current is node:
self.current = node.remove()
else:
node.remove()
def insertprev(self, node):
if self.current is None:
self.current = node
else:
self.current.insertprev(node)
def insertcurrent(self, node):
self.insertprev(node)
self.current = node
def next(self):
ret = self.current
if self.current is not None:
self.current = self.current.next
return ret
def clear(self):
self.current = None
def __init__(self, parent = None, priority = 0):
self.queues = CBQueue.MultiQueue.CircleList()
self.queueDict = {}
self.queueStat = {}
self.statseq = deque()
self.parent = parent
self.priority = priority
self.totalSize = 0
self.blocked = True
def canPop(self):
return bool(self.queues.current)
def _pop(self):
if not self.canPop():
raise IndexError('pop from an empty or blocked queue')
c = self.queues.next()
ret = c.value._pop()
self.queueStat[c.value] = self.queueStat.get(c.value, 0) + 1
while len(self.statseq) >= 10 * len(self.queueDict) + 10:
o = self.statseq.popleft()
if o in self.queueStat:
self.queueStat[o] = self.queueStat[o] - 1
if self.queueStat[o] <= 0 and not o in self.queueDict:
del self.queueStat[o]
self.statseq.append(c.value)
if not c.value.canPop():
self.queues.remove(c)
self.queueDict[c.value] = None
self.totalSize = self.totalSize - 1
if not self.canPop():
if not self.blocked:
self.blocked = True
if self.parent is not None:
self.parent.notifyBlock(self, True)
return ret
def addSubQueue(self, queue):
self.totalSize = self.totalSize + len(queue)
queue.parent = self
if queue.canPop():
# Activate this queue
node = CBQueue.MultiQueue.CircleListNode(queue)
self.queues.insertprev(node)
self.queueDict[queue] = node
self.queueStat[queue] = 0
else:
self.queueDict[queue] = None
if self.canPop():
if self.blocked:
self.blocked = False
if self.parent is not None:
self.parent.notifyBlock(self, False)
def removeSubQueue(self, queue):
self.totalSize = self.totalSize - len(queue)
if self.queueDict[queue] is not None:
self.queues.remove(self.queueDict[queue])
del self.queueDict[queue]
if queue in self.queueStat:
del self.queueStat[queue]
if not self.canPop():
if not self.blocked:
self.blocked = True
if self.parent is not None:
self.parent.notifyBlock(self, True)
def notifyAppend(self, queue, force):
if self.parent is not None:
m = self.parent.notifyAppend(self, force)
if m is not None:
return m
self.totalSize = self.totalSize + 1
if not queue.blocked:
if self.queueDict[queue] is None:
# Activate this queue
node = CBQueue.MultiQueue.CircleListNode(queue)
qs = self.queueStat.setdefault(queue, 0)
if qs * len(self.queueStat) >= len(self.statseq):
self.queues.insertprev(node)
else:
self.queues.insertcurrent(node)
self.queueDict[queue] = node
if self.canPop():
if self.blocked:
self.blocked = False
if self.parent is not None:
self.parent.notifyBlock(self, False)
return None
def __len__(self):
return self.totalSize
def notifyBlock(self, queue, blocked):
if queue.canPop():
if self.queueDict[queue] is None:
# Activate this queue
node = CBQueue.MultiQueue.CircleListNode(queue)
qs = self.queueStat.setdefault(queue, 0)
if qs * len(self.queueStat) >= len(self.statseq):
self.queues.insertprev(node)
else:
self.queues.insertcurrent(node)
self.queueDict[queue] = node
else:
if self.queueDict[queue] is not None:
self.queues.remove(self.queueDict[queue])
self.queueDict[queue] = None
selfblocked = not self.canPop()
if selfblocked != self.blocked:
self.blocked = selfblocked
if self.parent is not None:
self.parent.notifyBlock(self, selfblocked)
def notifyPop(self, queue, length = 1):
self.totalSize = self.totalSize - length
if not queue.canPop():
if self.queueDict[queue] is not None:
self.queues.remove(self.queuDict[queue])
self.queueDict[queue] = None
ret = ([], [])
if self.parent is not None:
ret = self.parent.notifyPop(self, length)
blocked = not self.canPop()
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
return ret
def unblockall(self):
for q in self.queueDict.keys():
q.unblockall()
def _clear(self):
ret = ([],[])
for q in self.queueDict.keys():
pr = q._clear()
ret[0].extend(pr[0])
ret[1].extend(pr[1])
self.totalSize = 0
self.blockedSize = 0
self.queues.clear()
if not self.blocked:
self.blocked = True
if self.parent is not None:
self.parent.notifyBlock(self, True)
return ret
class AutoClassQueue(object):
'''
A queue classify events into virtual sub-queues by key
'''
nokey = object()
def __init__(self, parent = None, maxlength = None, key = 'owner', preserveForNew = 1, maxstat = None, subqueuelimit = None):
self.queues = CBQueue.MultiQueue.CircleList()
self.queueDict = {}
self.queueStat = {}
self.statseq = deque()
self.maxlength = maxlength
self.blocked = False
if self.maxlength is not None and self.maxlength <= 0:
self.maxlength = 1
if maxstat is None:
if self.maxlength is None:
self.maxstat = 10240
else:
self.maxstat = maxlength * 10
else:
self.maxstat = maxstat
self.waited = set()
self.key = key
self.preserve = preserveForNew
self.totalSize = 0
self.blockKeys = set()
self.subqueuelimit = subqueuelimit
@classmethod
def initHelper(cls, key = 'owner', preserveForNew = 1, maxstat = None, subqueuelimit = None):
def initer(parent = None, maxlength = None):
return cls(parent, maxlength, key, preserveForNew, maxstat, subqueuelimit)
return initer
def append(self, value, force = False):
key = getattr(value, self.key, self.nokey)
if not force:
w = self._tryAppend(key)
if w is not None:
return w
if self.parent is not None:
m = self.parent.notifyAppend(self, force)
if m is not None:
return m
if key in self.queueDict:
self.queueDict[key].value[1].append(value)
else:
node = CBQueue.MultiQueue.CircleListNode((key,deque()))
node.value[1].append(value)
qs = self.queueStat.setdefault(key, 0)
if qs * len(self.queueStat) >= len(self.statseq):
self.queues.insertprev(node)
else:
self.queues.insertcurrent(node)
self.queueDict[key] = node
self.totalSize += 1
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
return None
def _tryAppend(self, key):
if self.maxlength is None:
if self.subqueuelimit is None or not key in self.queueDict:
return None
elif len(self.queueDict[key].value[1]) >= self.subqueuelimit:
self.waited.add((False, False, key))
return AutoClassQueueCanWriteEvent.createMatcher(self, _ismatch = lambda x: x.key == key or x.key is self.nokey)
else:
return None
if key in self.queueDict:
if self.subqueuelimit is not None and len(self.queueDict[key].value[1]) >= self.subqueuelimit:
self.waited.add((False, False, key))
return AutoClassQueueCanWriteEvent.createMatcher(self, _ismatch = lambda x: x.key == key or x.key is self.nokey)
elif self.totalSize < self.maxlength - self.preserve - len(self.queueStat) + len(self.queueDict):
return None
else:
if len(self.queueDict[key].value[1]) <= 1:
self.waited.add((False, True, key))
return AutoClassQueueCanWriteEvent.createMatcher(self, False, _ismatch = lambda x: not (x.firstonly and x.key != key))
else:
self.waited.add((False, False))
return AutoClassQueueCanWriteEvent.createMatcher(self, False, False)
elif key in self.queueStat:
if self.totalSize < self.maxlength - self.preserve:
return None
else:
self.waited.add((False, True))
return AutoClassQueueCanWriteEvent.createMatcher(self, False)
else:
if self.totalSize < self.maxlength:
return None
else:
self.waited.add((True, True))
return AutoClassQueueCanWriteEvent.createMatcher(self)
def canAppend(self):
return self.maxlength is None or self.totalSize < self.maxlength
def canPop(self):
return self.queues.current is not None
def pop(self):
ret = self._pop()
if self.parent is not None:
pr = self.parent.notifyPop(self)
ret[1].extend(pr[0])
ret[2].extend(pr[1])
return ret
def _pop(self):
if not self.canPop():
raise IndexError('pop from a blocked or empty queue')
c = self.queues.next()
key = c.value[0]
ret = c.value[1].popleft()
self.totalSize -= 1
self.queueStat[key] = self.queueStat.get(key, 0) + 1
while len(self.statseq) >= min(self.maxstat, 10 * len(self.queueStat) + 10):
k1 = self.statseq.popleft()
self.queueStat[k1] = self.queueStat[k1] - 1
if self.queueStat[k1] <= 0 and not k1 in self.queueDict:
del self.queueStat[k1]
self.statseq.append(key)
if not c.value[1]:
del self.queueDict[c.value[0]]
self.queues.remove(c)
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
if self.waited:
if key not in self.queueDict:
subsize = 0
else:
subsize = len(self.queueDict[key].value[1])
if self.maxlength is None:
if self.subqueuelimit is not None and subsize < self.subqueuelimit and (False, False, key) in self.waited:
return (ret, [AutoClassQueueCanWriteEvent(self, False, False, key=key)], [])
elif self.totalSize < self.maxlength - self.preserve - len(self.queueStat) + len(self.queueDict):
self.waited = set(w for w in self.waited if len(w) == 3 and w[1] == False and w[2] != key)
return (ret, [AutoClassQueueCanWriteEvent(self, False, False, key=key)], [])
elif self.totalSize < self.maxlength - self.preserve:
if (False, True) in self.waited or (False, True, key) in self.waited or (True, True) in self.waited or \
(False, False, key) in self.waited:
self.waited.discard((False, True))
self.waited.discard((False, True, key))
self.waited.discard((True, True))
self.waited.discard((False, False, key))
return (ret, [AutoClassQueueCanWriteEvent(self, False, True, key=key)], [])
elif self.totalSize < self.maxlength:
if (True, True) in self.waited or (False, False, key) in self.waited or (False, True, key) in self.waited:
self.waited.discard((True, True))
self.waited.discard((False, False, key))
if (False, True, key) in self.waited:
self.waited.discard((False, True, key))
return (ret, [AutoClassQueueCanWriteEvent(self, False, True, key=key)], [])
else:
return (ret, [AutoClassQueueCanWriteEvent(self, True, True, key=key)], [])
elif self.subqueuelimit is not None and subsize < self.subqueuelimit and (False, False, key) in self.waited:
# If we don't wake up the sub-queue waiter now, it may wait forever.
# The sub-queue waiter won't be able to send events in, but they will get a new matcher
# Some waiters might wake up mistakenly, they will wait again when they try to append the event.
self.waited.discard((True, True))
self.waited.discard((False, False, key))
return (ret, [AutoClassQueueCanWriteEvent(self, True, True, key=key)], [])
return (ret, [], [])
def clear(self):
l = len(self)
ret = self._clear()
if self.parent is not None:
pr = self.parent.notifyPop(self, l)
ret[0].extend(pr[0])
ret[1].extend(pr[1])
return ret
def _clear(self):
self.queues.clear()
self.blockKeys.clear()
self.queueDict.clear()
self.totalSize = 0
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
if self.waited:
self.waited.clear()
return ([AutoClassQueueCanWriteEvent(self, False, False, key=self.nokey)], [])
else:
return ([], [])
def __len__(self):
return self.totalSize
def block(self, value):
if self.parent is not None:
self.parent.notifyAppend(self, True)
key = getattr(value, self.key, self.nokey)
if key in self.queueDict:
self.queueDict[key].value[1].appendleft(value)
self.queues.remove(self.queueDict[key])
else:
node = CBQueue.MultiQueue.CircleListNode((key,deque()))
node.value[1].append(value)
self.queueDict[key] = node
self.blockKeys.add(key)
self.totalSize += 1
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
def unblock(self, value):
key = getattr(value, self.key, self.nokey)
if key in self.blockKeys:
self._unblock(key)
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
def _unblock(self, key):
self.blockKeys.remove(key)
node = self.queueDict[key]
qs = self.queueStat.setdefault(key, 0)
if qs * len(self.queueStat) >= len(self.statseq):
self.queues.insertprev(node)
else:
self.queues.insertcurrent(node)
def unblockall(self):
for k in list(self.blockKeys):
self._unblock(k)
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
def __init__(self, tree = None, parent = None, maxdefault = None, maxtotal = None, defaultQueueClass = FifoQueue, defaultQueuePriority = 0):
'''
Constructor
'''
self.queues = {}
self.queueindex = {}
self.prioritySet = []
if tree is None:
self.tree = MatchTree()
else:
self.tree = tree
self.parent = parent
defaultPriority = CBQueue.MultiQueue(self, defaultQueuePriority)
defaultQueue = defaultQueueClass(defaultPriority, maxdefault)
defaultPriority.addSubQueue(defaultQueue)
self.queues[defaultQueuePriority] = defaultPriority
self.tree.insert(None, defaultQueue)
self.defaultQueue = defaultQueue
self.totalSize = 0
self.maxtotal = maxtotal
self.blocked = True
self.blockEvents = {}
self.isWaited = False
self.isWaitEmpty = False
self.outputStat = 0
def _removeFromTree(self):
for v in self.queueindex.values():
if len(v) == 3:
v[1]._removeFromTree()
self.tree.remove(None, self.defaultQueue)
self.tree = None
def canAppend(self):
'''
Whether the queue is full or not. Only check the total limit. Sub-queue may still be full (even default).
:returns: False if the queue is full, True if not.
If there are sub-queues, append() may still fail if the sub-queue is full.
'''
return self.maxtotal is None or self.totalSize < self.maxtotal
def append(self, event, force = False):
'''
Append an event to queue. The events are classified and appended to sub-queues
:param event: input event
:param force: if True, the event is appended even if the queue is full
:returns: None if appended successfully, or a matcher to match a QueueCanWriteEvent otherwise
'''
if self.tree is None:
if self.parent is None:
raise IndexError('The queue is removed')
else:
return self.parent.parent.append(event, force)
q = self.tree.matchfirst(event)
return q.append(event, force)
def waitForEmpty(self):
'''
Make this queue generate a QueueIsEmptyEvent when it is empty
:returns: matcher for QueueIsEmptyEvent, or None if the queue is already empty
'''
if not self:
return None
self.isWaitEmpty = True
return QueueIsEmptyEvent.createMatcher(self)
def block(self, event, emptyEvents = ()):
'''
Return a recently popped event to queue, and block all later events until unblock.
Only the sub-queue directly containing the event is blocked, so events in other queues may still be processed.
It is illegal to call block and unblock in different queues with a same event.
:param event: the returned event. When the queue is unblocked later, this event will be popped again.
:param emptyEvents: reactivate the QueueIsEmptyEvents
'''
q = self.tree.matchfirst(event)
q.block(event)
self.blockEvents[event] = q
for ee in emptyEvents:
ee.queue.waitForEmpty()
def unblock(self, event):
'''
Remove a block
'''
if event not in self.blockEvents:
return
self.blockEvents[event].unblock(event)
del self.blockEvents[event]
def unblockqueue(self, queue):
'''
Remove blocked events from the queue and all subqueues. Usually used after queue clear/unblockall to prevent leak.
:returns: the cleared events
'''
subqueues = set()
def allSubqueues(q):
subqueues.add(q)
for v in q.queueindex.values():
if len(v) == 3:
allSubqueues(v[1])
allSubqueues(queue)
events = [k for k,v in self.blockEvents.items() if v in subqueues]
for e in events:
del self.blockEvents[e]
return events
def unblockall(self):
'''
Remove all blocks from the queue and all sub-queues
'''
for q in self.queues.values():
q.unblockall()
self.blockEvents.clear()
def notifyAppend(self, queue, force):
'''
Internal notify for sub-queues
:returns: If the append is blocked by parent, an EventMatcher is returned, None else.
'''
if not force and not self.canAppend():
self.isWaited = True
return QueueCanWriteEvent.createMatcher(self)
if self.parent is not None:
m = self.parent.notifyAppend(self, force)
if m is not None:
return m
self.totalSize = self.totalSize + 1
return None
def notifyBlock(self, queue, blocked):
'''
Internal notify for sub-queues been blocked
'''
if blocked:
if self.prioritySet[-1] == queue.priority:
self.prioritySet.pop()
else:
pindex = bisect_left(self.prioritySet, queue.priority)
if pindex < len(self.prioritySet) and self.prioritySet[pindex] == queue.priority:
del self.prioritySet[pindex]
else:
if queue.canPop():
pindex = bisect_left(self.prioritySet, queue.priority)
if pindex >= len(self.prioritySet) or self.prioritySet[pindex] != queue.priority:
self.prioritySet.insert(pindex, queue.priority)
newblocked = not self.canPop()
if newblocked != self.blocked:
self.blocked = newblocked
if self.parent is not None:
self.parent.notifyBlock(self, newblocked)
def notifyPop(self, queue, length = 1):
'''
Internal notify for sub-queues been poped
:returns: List of any events generated by this pop
'''
self.totalSize = self.totalSize - length
ret1 = []
ret2 = []
if self.isWaited and self.canAppend():
self.isWaited = False
ret1.append(QueueCanWriteEvent(self))
if self.isWaitEmpty and not self:
self.isWaitEmpty = False
ret2.append(QueueIsEmptyEvent(self))
if self.parent is not None:
pr = self.parent.notifyPop(self, length)
ret1 += pr[0]
ret2 += pr[1]
newblocked = not self.canPop()
if newblocked != self.blocked:
self.blocked = newblocked
if self.parent is not None:
self.parent.notifyBlock(self, newblocked)
return (ret1, ret2)
def canPop(self):
'''
Whether the queue is empty/blocked or not
:returns: False if the queue is empty or blocked, or True otherwise
'''
return bool(self.prioritySet)
def pop(self):
'''
Pop an event from the queue. The event in the queue with higher priority is popped before ones in lower priority.
If there are multiple queues with the same priority, events are taken in turn from each queue.
May return some queueEvents indicating that some of the queues can be written into.
:returns: (obj, (queueEvents,...), (queueEmptyEvents,...)) where obj is the popped event, queueEvents are QueueCanWriteEvents generated by this pop
and queueEmptyEvents are QueueIsEmptyEvents generated by this pop
'''
ret = self._pop()
if self.parent is not None:
pr = self.parent.notifyPop(self)
ret[1].extend(pr[0])
ret[2].extend(pr[1])
return ret
def _pop(self):
'''
Actual pop
'''
if not self.canPop():
raise IndexError('pop from an empty or blocked queue')
priority = self.prioritySet[-1]
ret = self.queues[priority]._pop()
self.outputStat = self.outputStat + 1
self.totalSize = self.totalSize - 1
if self.isWaited and self.canAppend():
self.isWaited = False
ret[1].append(QueueCanWriteEvent(self))
if self.isWaitEmpty and not self:
self.isWaitEmpty = False
ret[2].append(QueueIsEmptyEvent(self))
return ret
def clear(self):
'''
Clear all the events in this queue, including any sub-queues.
:returns: ((queueEvents,...), (queueEmptyEvents,...)) where queueEvents are QueueCanWriteEvents generated by clearing.
'''
l = len(self)
ret = self._clear()
if self.parent is not None:
pr = self.parent.notifyPop(self, l)
ret[0].extend(pr[0])
ret[1].extend(pr[1])
return ret
def _clear(self):
'''
Actual clear
'''
ret = ([],[])
for q in self.queues.values():
pr = q._clear()
ret[0].extend(pr[0])
ret[1].extend(pr[1])
self.totalSize = 0
del self.prioritySet[:]
if self.isWaited and self.canAppend():
self.isWaited = False
ret[0].append(QueueCanWriteEvent(self))
if self.isWaitEmpty and not self:
self.isWaitEmpty = False
ret[1].append(QueueIsEmptyEvent(self))
self.blockEvents.clear()
return ret
def __contains__(self, name):
return name in self.queueindex
def __getitem__(self, name):
'''
Get a sub-queue through q['sub-queue-name']
'''
return self.queueindex[name][1]
def getPriority(self, queue):
'''
get priority of a sub-queue
'''
return self.queueindex[queue][0]
def setPriority(self, queue, priority):
'''
Set priority of a sub-queue
'''
q = self.queueindex[queue]
self.queues[q[0]].removeSubQueue(q[1])
newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority))
q[0] = priority
newPriority.addSubQueue(q[1])
def addSubQueue(self, priority, matcher, name = None, maxdefault = None, maxtotal = None, defaultQueueClass = FifoQueue):
'''
add a sub queue to current queue, with a priority and a matcher
:param priority: priority of this queue. Larger is higher, 0 is lowest.
:param matcher: an event matcher to catch events. Every event match the criteria will be stored in this queue.
:param name: a unique name to identify the sub-queue. If none, the queue is anonymous. It can be any hashable value.
:param maxdefault: max length for default queue.
:param maxtotal: max length for sub-queue total, including sub-queues of sub-queue
'''
if name is not None and name in self.queueindex:
raise IndexError("Duplicated sub-queue name '" + str(name) + "'")
subtree = self.tree.subtree(matcher, True)
newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority))
newQueue = CBQueue(subtree, newPriority, maxdefault, maxtotal, defaultQueueClass)
newPriority.addSubQueue(newQueue)
qi = [priority, newQueue, name]
if name is not None:
self.queueindex[name] = qi
self.queueindex[newQueue] = qi
return newQueue
def removeSubQueue(self, queue):
'''
remove a sub queue from current queue.
This unblock the sub-queue, retrieve all events from the queue and put them back to the parent.
Call clear on the sub-queue first if the events are not needed any more.
:param queue: the name or queue object to remove
:returns: ((queueevents,...), (queueEmptyEvents,...)) Possible queue events from removing sub-queues
'''
q = self.queueindex[queue]
q[1].unblockall()
q[1]._removeFromTree()
ret = ([],[])
while q[1].canPop():
r = q[1].pop()
self.append(r[0], True)
ret[0].extend(r[1])
ret[1].extend(r[2])
self.queues[q[0]].removeSubQueue(q[1])
# Remove from index
if q[2] is not None:
del self.queueindex[q[2]]
del self.queueindex[q[1]]
newblocked = not self.canPop()
if newblocked != self.blocked:
self.blocked = newblocked
if self.parent is not None:
self.parent.notifyBlock(self, newblocked)
return ret
def __len__(self):
return self.totalSize
- Memory leak with AutoClassQueue
'''
Created on 2015/06/02
:author: hubo
'''
from __future__ import print_function, absolute_import, division
from collections import deque
from .matchtree import MatchTree
from .event import Event, withIndices
from bisect import bisect_left
from heapq import heappush, heappop
import weakref
@withIndices('queue')
class QueueCanWriteEvent(Event):
pass
@withIndices('queue')
class QueueIsEmptyEvent(Event):
pass
@withIndices('newonly', 'firstonly')
class AutoClassQueueCanWriteEvent(QueueCanWriteEvent):
pass
class CBQueue(object):
'''
A multi-queue model with priority and balance.
When first created, there is a default queue with priority 0. More sub-queues maybe created with addSubQueue.
Each sub-queue is a CBQueue which accepts more sub-queues. Sub-queues are considered as black-box to the outer parent.
'''
class FifoQueue(object):
'''
A wrapper for a FIFO queue
'''
def __init__(self, parent = None, maxlength = None):
self.queue = deque()
self.parent = parent
self.maxlength = maxlength
self.blocked = False
if self.maxlength is not None and self.maxlength <= 0:
self.maxlength = 1
self.isWaited = False
def append(self, value, force = False):
if not force and not self.canAppend():
self.isWaited = True
return QueueCanWriteEvent.createMatcher(self)
if self.parent is not None:
m = self.parent.notifyAppend(self, force)
if m is not None:
return m
self.queue.append(value)
return None
def canAppend(self):
return self.maxlength is None or len(self.queue) < self.maxlength
def canPop(self):
return self.queue and not self.blocked
def pop(self):
ret = self._pop()
if self.parent is not None:
pr = self.parent.notifyPop(self)
ret[1].extend(pr[0])
ret[2].extend(pr[1])
return ret
def _pop(self):
if self.blocked:
raise IndexError('pop from a blocked queue')
ret = self.queue.popleft()
if self.isWaited and self.canAppend():
self.isWaited = False
return (ret, [QueueCanWriteEvent(self)], [])
else:
return (ret, [], [])
def clear(self):
l = len(self)
ret = self._clear()
if self.parent is not None:
pr = self.parent.notifyPop(self, l)
ret[0].extend(pr[0])
ret[1].extend(pr[1])
return ret
def _clear(self):
if self.blocked:
self.unblockall()
self.queue.clear()
if self.isWaited and self.canAppend():
self.isWaited = False
return (QueueCanWriteEvent(self), [])
else:
return ([], [])
def __len__(self):
return len(self.queue)
def block(self, value):
if self.parent is not None:
self.parent.notifyAppend(self, True)
self.queue.appendleft(value)
if not self.blocked:
self.blocked = True
if self.parent is not None:
self.parent.notifyBlock(self, True)
def unblock(self, value):
if self.blocked:
self.blocked = False
if self.parent is not None:
self.parent.notifyBlock(self, False)
def unblockall(self):
if self.blocked:
self.blocked = False
if self.parent is not None:
self.parent.notifyBlock(self, False)
class PriorityQueue(object):
'''
A queue with inner built priority. Event must have a "priority" property to use with this type of queue.
For fail-safe, events without "priority" property have the lowest priority.
NOTICE: different from the queue priority, the priority property is smaller-higher, and is not limited to integers.
This allows datetime to be used as an increasing priority
'''
def __init__(self, parent = None, maxlength = None, key = 'priority'):
# a heap
self.queue = []
self.deque = deque()
self.parent = parent
self.maxlength = maxlength
self.blocks = set()
if self.maxlength is not None and self.maxlength <= 0:
self.maxlength = 1
self.blocked = False
self.isWaited = False
self.key = key
@classmethod
def initHelper(cls, key = 'priority'):
def initer(parent = None, maxlength = None):
return cls(parent, maxlength, key)
return initer
def append(self, value, force = False):
if not force and not self.canAppend():
self.isWaited = True
return QueueCanWriteEvent.createMatcher(self)
if self.parent is not None:
m = self.parent.notifyAppend(self, force)
if m is not None:
return m
if hasattr(value, self.key):
heappush(self.queue, (getattr(value, self.key), value))
# a priority push may change the block status
if self.blocked and not self.queue[0][1] in self.blocks:
self.blocked = False
if self.parent is not None:
self.parent.notifyBlock(self, False)
else:
self.deque.append(value)
return None
def canAppend(self):
return self.maxlength is None or len(self.queue) + len(self.deque) < self.maxlength
def canPop(self):
return len(self.queue) + len(self.deque) > 0 and not self.blocked
def pop(self):
ret = self._pop()
if self.parent is not None:
pr = self.parent.notifyPop(self)
ret[1].extend(pr[0])
ret[2].extend(pr[1])
return ret
def _pop(self):
if self.blocked:
raise IndexError('pop from a blocked queue')
if self.queue:
ret = heappop(self.queue)[1]
else:
ret = self.deque.popleft()
if self.queue:
blocked = self.queue[0][1] in self.blocks
elif self.deque:
blocked = self.deque[0] in self.blocks
else:
blocked = False
if self.blocked != blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
if self.isWaited and self.canAppend():
self.isWaited = False
return (ret, [QueueCanWriteEvent(self)], [])
else:
return (ret, [], [])
def _clear(self):
if self.blocks:
self.unblockall()
del self.queue[:]
self.deque.clear()
if self.isWaited and self.canAppend():
self.isWaited = False
return ([QueueCanWriteEvent(self)], [])
else:
return ([], [])
def clear(self):
l = len(self)
ret = self._clear()
if self.parent is not None:
pr = self.parent.notifyPop(self, l)
ret[0].extend(pr[0])
ret[1].extend(pr[1])
return ret
def __len__(self):
return len(self.queue) + len(self.deque)
def block(self, value):
self.blocks.add(value)
if self.parent is not None:
self.parent.notifyAppend(self, True)
if hasattr(value, self.key):
heappush(self.queue, (getattr(value, self.key), value))
else:
self.deque.appendleft(value)
if self.queue:
blocked = self.queue[0][1] in self.blocks
elif self.deque:
blocked = self.deque[0] in self.blocks
else:
blocked = False
if self.blocked != blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
def unblock(self, value):
self.blocks.remove(value)
if self.queue:
blocked = self.queue[0][1] in self.blocks
elif self.deque:
blocked = self.deque[0] in self.blocks
else:
blocked = False
if self.blocked != blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
def unblockall(self):
self.blocks.clear()
blocked = False
if self.blocked != blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
class MultiQueue(object):
'''
A multi-queue container, every queue in a multi-queue has the same priority, and is popped in turn.
'''
class CircleListNode(object):
'''
Circle link list
'''
def __init__(self, value):
self.prev = self
self.value = value
self.next = self
def insertprev(self, node):
self.prev.next = node
node.prev = self.prev
node.next = self
self.prev = node
return self
def remove(self):
if self.next is self:
return None
self.prev.next = self.next
self.next.prev = self.prev
ret = self.next
self.next = self
self.prev = self
return ret
class CircleList(object):
def __init__(self):
self.current = None
def remove(self, node):
if self.current is node:
self.current = node.remove()
else:
node.remove()
def insertprev(self, node):
if self.current is None:
self.current = node
else:
self.current.insertprev(node)
def insertcurrent(self, node):
self.insertprev(node)
self.current = node
def next(self):
ret = self.current
if self.current is not None:
self.current = self.current.next
return ret
def clear(self):
self.current = None
def __init__(self, parent = None, priority = 0):
self.queues = CBQueue.MultiQueue.CircleList()
self.queueDict = {}
self.queueStat = {}
self.statseq = deque()
self.parent = parent
self.priority = priority
self.totalSize = 0
self.blocked = True
def canPop(self):
return bool(self.queues.current)
def _pop(self):
if not self.canPop():
raise IndexError('pop from an empty or blocked queue')
c = self.queues.next()
ret = c.value._pop()
self.queueStat[c.value] = self.queueStat.get(c.value, 0) + 1
while len(self.statseq) >= 10 * len(self.queueDict) + 10:
o = self.statseq.popleft()
if o in self.queueStat:
self.queueStat[o] = self.queueStat[o] - 1
if self.queueStat[o] <= 0 and not o in self.queueDict:
del self.queueStat[o]
self.statseq.append(c.value)
if not c.value.canPop():
self.queues.remove(c)
self.queueDict[c.value] = None
self.totalSize = self.totalSize - 1
if not self.canPop():
if not self.blocked:
self.blocked = True
if self.parent is not None:
self.parent.notifyBlock(self, True)
return ret
def addSubQueue(self, queue):
self.totalSize = self.totalSize + len(queue)
queue.parent = self
if queue.canPop():
# Activate this queue
node = CBQueue.MultiQueue.CircleListNode(queue)
self.queues.insertprev(node)
self.queueDict[queue] = node
self.queueStat[queue] = 0
else:
self.queueDict[queue] = None
if self.canPop():
if self.blocked:
self.blocked = False
if self.parent is not None:
self.parent.notifyBlock(self, False)
def removeSubQueue(self, queue):
self.totalSize = self.totalSize - len(queue)
if self.queueDict[queue] is not None:
self.queues.remove(self.queueDict[queue])
del self.queueDict[queue]
if queue in self.queueStat:
del self.queueStat[queue]
if not self.canPop():
if not self.blocked:
self.blocked = True
if self.parent is not None:
self.parent.notifyBlock(self, True)
def notifyAppend(self, queue, force):
if self.parent is not None:
m = self.parent.notifyAppend(self, force)
if m is not None:
return m
self.totalSize = self.totalSize + 1
if not queue.blocked:
if self.queueDict[queue] is None:
# Activate this queue
node = CBQueue.MultiQueue.CircleListNode(queue)
qs = self.queueStat.setdefault(queue, 0)
if qs * len(self.queueStat) >= len(self.statseq):
self.queues.insertprev(node)
else:
self.queues.insertcurrent(node)
self.queueDict[queue] = node
if self.canPop():
if self.blocked:
self.blocked = False
if self.parent is not None:
self.parent.notifyBlock(self, False)
return None
def __len__(self):
return self.totalSize
def notifyBlock(self, queue, blocked):
if queue.canPop():
if self.queueDict[queue] is None:
# Activate this queue
node = CBQueue.MultiQueue.CircleListNode(queue)
qs = self.queueStat.setdefault(queue, 0)
if qs * len(self.queueStat) >= len(self.statseq):
self.queues.insertprev(node)
else:
self.queues.insertcurrent(node)
self.queueDict[queue] = node
else:
if self.queueDict[queue] is not None:
self.queues.remove(self.queueDict[queue])
self.queueDict[queue] = None
selfblocked = not self.canPop()
if selfblocked != self.blocked:
self.blocked = selfblocked
if self.parent is not None:
self.parent.notifyBlock(self, selfblocked)
def notifyPop(self, queue, length = 1):
self.totalSize = self.totalSize - length
if not queue.canPop():
if self.queueDict[queue] is not None:
self.queues.remove(self.queuDict[queue])
self.queueDict[queue] = None
ret = ([], [])
if self.parent is not None:
ret = self.parent.notifyPop(self, length)
blocked = not self.canPop()
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
return ret
def unblockall(self):
for q in self.queueDict.keys():
q.unblockall()
def _clear(self):
ret = ([],[])
for q in self.queueDict.keys():
pr = q._clear()
ret[0].extend(pr[0])
ret[1].extend(pr[1])
self.totalSize = 0
self.blockedSize = 0
self.queues.clear()
if not self.blocked:
self.blocked = True
if self.parent is not None:
self.parent.notifyBlock(self, True)
return ret
class AutoClassQueue(object):
'''
A queue classify events into virtual sub-queues by key
'''
nokey = object()
def __init__(self, parent = None, maxlength = None, key = 'owner', preserveForNew = 1, maxstat = None, subqueuelimit = None):
self.queues = CBQueue.MultiQueue.CircleList()
self.queueDict = {}
self.queueStat = {}
self.statseq = deque()
self.maxlength = maxlength
self.blocked = False
if self.maxlength is not None and self.maxlength <= 0:
self.maxlength = 1
if maxstat is None:
if self.maxlength is None:
self.maxstat = 10240
else:
self.maxstat = maxlength * 10
else:
self.maxstat = maxstat
if self.maxstat >= 10240:
self.maxstat = 10240
self.waited = set()
self.key = key
self.preserve = preserveForNew
self.totalSize = 0
self.blockKeys = set()
self.subqueuelimit = subqueuelimit
@classmethod
def initHelper(cls, key = 'owner', preserveForNew = 1, maxstat = None, subqueuelimit = None):
def initer(parent = None, maxlength = None):
return cls(parent, maxlength, key, preserveForNew, maxstat, subqueuelimit)
return initer
def append(self, value, force = False):
key = getattr(value, self.key, self.nokey)
# We use hash instead of reference or weakref, this may cause problem, but better thank leak.
kid = hash(key)
if not force:
w = self._tryAppend(key)
if w is not None:
return w
if self.parent is not None:
m = self.parent.notifyAppend(self, force)
if m is not None:
return m
if key in self.queueDict:
self.queueDict[key].value[1].append(value)
else:
node = CBQueue.MultiQueue.CircleListNode((key,deque()))
node.value[1].append(value)
qs = self.queueStat.setdefault(kid, 0)
if qs * len(self.queueStat) >= len(self.statseq):
self.queues.insertprev(node)
else:
self.queues.insertcurrent(node)
self.queueDict[key] = node
self.totalSize += 1
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
return None
def _tryAppend(self, key):
if self.maxlength is None:
if self.subqueuelimit is None or not key in self.queueDict:
return None
elif len(self.queueDict[key].value[1]) >= self.subqueuelimit:
self.waited.add((False, False, key))
return AutoClassQueueCanWriteEvent.createMatcher(self, _ismatch = lambda x: x.key == key or x.key is self.nokey)
else:
return None
if key in self.queueDict:
if self.subqueuelimit is not None and len(self.queueDict[key].value[1]) >= self.subqueuelimit:
self.waited.add((False, False, key))
return AutoClassQueueCanWriteEvent.createMatcher(self, _ismatch = lambda x: x.key == key or x.key is self.nokey)
elif self.totalSize < self.maxlength - self.preserve - len(self.queueStat) + len(self.queueDict):
return None
else:
if len(self.queueDict[key].value[1]) <= 1:
self.waited.add((False, True, key))
return AutoClassQueueCanWriteEvent.createMatcher(self, False, _ismatch = lambda x: not (x.firstonly and x.key != key))
else:
self.waited.add((False, False))
return AutoClassQueueCanWriteEvent.createMatcher(self, False, False)
elif hash(key) in self.queueStat:
if self.totalSize < self.maxlength - self.preserve:
return None
else:
self.waited.add((False, True))
return AutoClassQueueCanWriteEvent.createMatcher(self, False)
else:
if self.totalSize < self.maxlength:
return None
else:
self.waited.add((True, True))
return AutoClassQueueCanWriteEvent.createMatcher(self)
def canAppend(self):
return self.maxlength is None or self.totalSize < self.maxlength
def canPop(self):
return self.queues.current is not None
def pop(self):
ret = self._pop()
if self.parent is not None:
pr = self.parent.notifyPop(self)
ret[1].extend(pr[0])
ret[2].extend(pr[1])
return ret
def _pop(self):
if not self.canPop():
raise IndexError('pop from a blocked or empty queue')
c = self.queues.next()
key = c.value[0]
kid = hash(key)
ret = c.value[1].popleft()
self.totalSize -= 1
self.queueStat[kid] = self.queueStat.get(kid, 0) + 1
while len(self.statseq) >= min(self.maxstat, 10 * len(self.queueStat) + 10):
k1 = self.statseq.popleft()
self.queueStat[k1] = self.queueStat[k1] - 1
if self.queueStat[k1] <= 0:
del self.queueStat[k1]
self.statseq.append(kid)
if not c.value[1]:
del self.queueDict[c.value[0]]
self.queues.remove(c)
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
if self.waited:
if key not in self.queueDict:
subsize = 0
else:
subsize = len(self.queueDict[key].value[1])
if self.maxlength is None:
if self.subqueuelimit is not None and subsize < self.subqueuelimit and (False, False, key) in self.waited:
return (ret, [AutoClassQueueCanWriteEvent(self, False, False, key=key)], [])
elif self.totalSize < self.maxlength - self.preserve - len(self.queueStat) + len(self.queueDict):
self.waited = set(w for w in self.waited if len(w) == 3 and w[1] == False and w[2] != key)
return (ret, [AutoClassQueueCanWriteEvent(self, False, False, key=key)], [])
elif self.totalSize < self.maxlength - self.preserve:
if (False, True) in self.waited or (False, True, key) in self.waited or (True, True) in self.waited or \
(False, False, key) in self.waited:
self.waited.discard((False, True))
self.waited.discard((False, True, key))
self.waited.discard((True, True))
self.waited.discard((False, False, key))
return (ret, [AutoClassQueueCanWriteEvent(self, False, True, key=key)], [])
elif self.totalSize < self.maxlength:
if (True, True) in self.waited or (False, False, key) in self.waited or (False, True, key) in self.waited:
self.waited.discard((True, True))
self.waited.discard((False, False, key))
if (False, True, key) in self.waited:
self.waited.discard((False, True, key))
return (ret, [AutoClassQueueCanWriteEvent(self, False, True, key=key)], [])
else:
return (ret, [AutoClassQueueCanWriteEvent(self, True, True, key=key)], [])
elif self.subqueuelimit is not None and subsize < self.subqueuelimit and (False, False, key) in self.waited:
# If we don't wake up the sub-queue waiter now, it may wait forever.
# The sub-queue waiter won't be able to send events in, but they will get a new matcher
# Some waiters might wake up mistakenly, they will wait again when they try to append the event.
self.waited.discard((True, True))
self.waited.discard((False, False, key))
return (ret, [AutoClassQueueCanWriteEvent(self, True, True, key=key)], [])
return (ret, [], [])
def clear(self):
l = len(self)
ret = self._clear()
if self.parent is not None:
pr = self.parent.notifyPop(self, l)
ret[0].extend(pr[0])
ret[1].extend(pr[1])
return ret
def _clear(self):
self.queues.clear()
self.blockKeys.clear()
self.queueDict.clear()
self.totalSize = 0
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
if self.waited:
self.waited.clear()
return ([AutoClassQueueCanWriteEvent(self, False, False, key=self.nokey)], [])
else:
return ([], [])
def __len__(self):
return self.totalSize
def block(self, value):
if self.parent is not None:
self.parent.notifyAppend(self, True)
key = getattr(value, self.key, self.nokey)
if key in self.queueDict:
self.queueDict[key].value[1].appendleft(value)
self.queues.remove(self.queueDict[key])
else:
node = CBQueue.MultiQueue.CircleListNode((key,deque()))
node.value[1].append(value)
self.queueDict[key] = node
self.blockKeys.add(key)
self.totalSize += 1
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
def unblock(self, value):
key = getattr(value, self.key, self.nokey)
if key in self.blockKeys:
self._unblock(key)
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
def _unblock(self, key):
self.blockKeys.remove(key)
node = self.queueDict[key]
qs = self.queueStat.setdefault(hash(key), 0)
if qs * len(self.queueStat) >= len(self.statseq):
self.queues.insertprev(node)
else:
self.queues.insertcurrent(node)
def unblockall(self):
for k in list(self.blockKeys):
self._unblock(k)
blocked = not self.canPop() and self.totalSize > 0
if blocked != self.blocked:
self.blocked = blocked
if self.parent is not None:
self.parent.notifyBlock(self, blocked)
def __init__(self, tree = None, parent = None, maxdefault = None, maxtotal = None, defaultQueueClass = FifoQueue, defaultQueuePriority = 0):
'''
Constructor
'''
self.queues = {}
self.queueindex = {}
self.prioritySet = []
if tree is None:
self.tree = MatchTree()
else:
self.tree = tree
self.parent = parent
defaultPriority = CBQueue.MultiQueue(self, defaultQueuePriority)
defaultQueue = defaultQueueClass(defaultPriority, maxdefault)
defaultPriority.addSubQueue(defaultQueue)
self.queues[defaultQueuePriority] = defaultPriority
self.tree.insert(None, defaultQueue)
self.defaultQueue = defaultQueue
self.totalSize = 0
self.maxtotal = maxtotal
self.blocked = True
self.blockEvents = {}
self.isWaited = False
self.isWaitEmpty = False
self.outputStat = 0
def _removeFromTree(self):
for v in self.queueindex.values():
if len(v) == 3:
v[1]._removeFromTree()
self.tree.remove(None, self.defaultQueue)
self.tree = None
def canAppend(self):
'''
Whether the queue is full or not. Only check the total limit. Sub-queue may still be full (even default).
:returns: False if the queue is full, True if not.
If there are sub-queues, append() may still fail if the sub-queue is full.
'''
return self.maxtotal is None or self.totalSize < self.maxtotal
def append(self, event, force = False):
'''
Append an event to queue. The events are classified and appended to sub-queues
:param event: input event
:param force: if True, the event is appended even if the queue is full
:returns: None if appended successfully, or a matcher to match a QueueCanWriteEvent otherwise
'''
if self.tree is None:
if self.parent is None:
raise IndexError('The queue is removed')
else:
return self.parent.parent.append(event, force)
q = self.tree.matchfirst(event)
return q.append(event, force)
def waitForEmpty(self):
'''
Make this queue generate a QueueIsEmptyEvent when it is empty
:returns: matcher for QueueIsEmptyEvent, or None if the queue is already empty
'''
if not self:
return None
self.isWaitEmpty = True
return QueueIsEmptyEvent.createMatcher(self)
def block(self, event, emptyEvents = ()):
'''
Return a recently popped event to queue, and block all later events until unblock.
Only the sub-queue directly containing the event is blocked, so events in other queues may still be processed.
It is illegal to call block and unblock in different queues with a same event.
:param event: the returned event. When the queue is unblocked later, this event will be popped again.
:param emptyEvents: reactivate the QueueIsEmptyEvents
'''
q = self.tree.matchfirst(event)
q.block(event)
self.blockEvents[event] = q
for ee in emptyEvents:
ee.queue.waitForEmpty()
def unblock(self, event):
'''
Remove a block
'''
if event not in self.blockEvents:
return
self.blockEvents[event].unblock(event)
del self.blockEvents[event]
def unblockqueue(self, queue):
'''
Remove blocked events from the queue and all subqueues. Usually used after queue clear/unblockall to prevent leak.
:returns: the cleared events
'''
subqueues = set()
def allSubqueues(q):
subqueues.add(q)
for v in q.queueindex.values():
if len(v) == 3:
allSubqueues(v[1])
allSubqueues(queue)
events = [k for k,v in self.blockEvents.items() if v in subqueues]
for e in events:
del self.blockEvents[e]
return events
def unblockall(self):
'''
Remove all blocks from the queue and all sub-queues
'''
for q in self.queues.values():
q.unblockall()
self.blockEvents.clear()
def notifyAppend(self, queue, force):
'''
Internal notify for sub-queues
:returns: If the append is blocked by parent, an EventMatcher is returned, None else.
'''
if not force and not self.canAppend():
self.isWaited = True
return QueueCanWriteEvent.createMatcher(self)
if self.parent is not None:
m = self.parent.notifyAppend(self, force)
if m is not None:
return m
self.totalSize = self.totalSize + 1
return None
def notifyBlock(self, queue, blocked):
'''
Internal notify for sub-queues been blocked
'''
if blocked:
if self.prioritySet[-1] == queue.priority:
self.prioritySet.pop()
else:
pindex = bisect_left(self.prioritySet, queue.priority)
if pindex < len(self.prioritySet) and self.prioritySet[pindex] == queue.priority:
del self.prioritySet[pindex]
else:
if queue.canPop():
pindex = bisect_left(self.prioritySet, queue.priority)
if pindex >= len(self.prioritySet) or self.prioritySet[pindex] != queue.priority:
self.prioritySet.insert(pindex, queue.priority)
newblocked = not self.canPop()
if newblocked != self.blocked:
self.blocked = newblocked
if self.parent is not None:
self.parent.notifyBlock(self, newblocked)
def notifyPop(self, queue, length = 1):
'''
Internal notify for sub-queues been poped
:returns: List of any events generated by this pop
'''
self.totalSize = self.totalSize - length
ret1 = []
ret2 = []
if self.isWaited and self.canAppend():
self.isWaited = False
ret1.append(QueueCanWriteEvent(self))
if self.isWaitEmpty and not self:
self.isWaitEmpty = False
ret2.append(QueueIsEmptyEvent(self))
if self.parent is not None:
pr = self.parent.notifyPop(self, length)
ret1 += pr[0]
ret2 += pr[1]
newblocked = not self.canPop()
if newblocked != self.blocked:
self.blocked = newblocked
if self.parent is not None:
self.parent.notifyBlock(self, newblocked)
return (ret1, ret2)
def canPop(self):
'''
Whether the queue is empty/blocked or not
:returns: False if the queue is empty or blocked, or True otherwise
'''
return bool(self.prioritySet)
def pop(self):
'''
Pop an event from the queue. The event in the queue with higher priority is popped before ones in lower priority.
If there are multiple queues with the same priority, events are taken in turn from each queue.
May return some queueEvents indicating that some of the queues can be written into.
:returns: (obj, (queueEvents,...), (queueEmptyEvents,...)) where obj is the popped event, queueEvents are QueueCanWriteEvents generated by this pop
and queueEmptyEvents are QueueIsEmptyEvents generated by this pop
'''
ret = self._pop()
if self.parent is not None:
pr = self.parent.notifyPop(self)
ret[1].extend(pr[0])
ret[2].extend(pr[1])
return ret
def _pop(self):
'''
Actual pop
'''
if not self.canPop():
raise IndexError('pop from an empty or blocked queue')
priority = self.prioritySet[-1]
ret = self.queues[priority]._pop()
self.outputStat = self.outputStat + 1
self.totalSize = self.totalSize - 1
if self.isWaited and self.canAppend():
self.isWaited = False
ret[1].append(QueueCanWriteEvent(self))
if self.isWaitEmpty and not self:
self.isWaitEmpty = False
ret[2].append(QueueIsEmptyEvent(self))
return ret
def clear(self):
'''
Clear all the events in this queue, including any sub-queues.
:returns: ((queueEvents,...), (queueEmptyEvents,...)) where queueEvents are QueueCanWriteEvents generated by clearing.
'''
l = len(self)
ret = self._clear()
if self.parent is not None:
pr = self.parent.notifyPop(self, l)
ret[0].extend(pr[0])
ret[1].extend(pr[1])
return ret
def _clear(self):
'''
Actual clear
'''
ret = ([],[])
for q in self.queues.values():
pr = q._clear()
ret[0].extend(pr[0])
ret[1].extend(pr[1])
self.totalSize = 0
del self.prioritySet[:]
if self.isWaited and self.canAppend():
self.isWaited = False
ret[0].append(QueueCanWriteEvent(self))
if self.isWaitEmpty and not self:
self.isWaitEmpty = False
ret[1].append(QueueIsEmptyEvent(self))
self.blockEvents.clear()
return ret
def __contains__(self, name):
return name in self.queueindex
def __getitem__(self, name):
'''
Get a sub-queue through q['sub-queue-name']
'''
return self.queueindex[name][1]
def getPriority(self, queue):
'''
get priority of a sub-queue
'''
return self.queueindex[queue][0]
def setPriority(self, queue, priority):
'''
Set priority of a sub-queue
'''
q = self.queueindex[queue]
self.queues[q[0]].removeSubQueue(q[1])
newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority))
q[0] = priority
newPriority.addSubQueue(q[1])
def addSubQueue(self, priority, matcher, name = None, maxdefault = None, maxtotal = None, defaultQueueClass = FifoQueue):
'''
add a sub queue to current queue, with a priority and a matcher
:param priority: priority of this queue. Larger is higher, 0 is lowest.
:param matcher: an event matcher to catch events. Every event match the criteria will be stored in this queue.
:param name: a unique name to identify the sub-queue. If none, the queue is anonymous. It can be any hashable value.
:param maxdefault: max length for default queue.
:param maxtotal: max length for sub-queue total, including sub-queues of sub-queue
'''
if name is not None and name in self.queueindex:
raise IndexError("Duplicated sub-queue name '" + str(name) + "'")
subtree = self.tree.subtree(matcher, True)
newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority))
newQueue = CBQueue(subtree, newPriority, maxdefault, maxtotal, defaultQueueClass)
newPriority.addSubQueue(newQueue)
qi = [priority, newQueue, name]
if name is not None:
self.queueindex[name] = qi
self.queueindex[newQueue] = qi
return newQueue
def removeSubQueue(self, queue):
'''
remove a sub queue from current queue.
This unblock the sub-queue, retrieve all events from the queue and put them back to the parent.
Call clear on the sub-queue first if the events are not needed any more.
:param queue: the name or queue object to remove
:returns: ((queueevents,...), (queueEmptyEvents,...)) Possible queue events from removing sub-queues
'''
q = self.queueindex[queue]
q[1].unblockall()
q[1]._removeFromTree()
ret = ([],[])
while q[1].canPop():
r = q[1].pop()
self.append(r[0], True)
ret[0].extend(r[1])
ret[1].extend(r[2])
self.queues[q[0]].removeSubQueue(q[1])
# Remove from index
if q[2] is not None:
del self.queueindex[q[2]]
del self.queueindex[q[1]]
newblocked = not self.canPop()
if newblocked != self.blocked:
self.blocked = newblocked
if self.parent is not None:
self.parent.notifyBlock(self, newblocked)
return ret
def __len__(self):
return self.totalSize
|
import asyncio
import aiohs2
import pandas as pd
import logging
from functools import wraps
coroutine = asyncio.coroutine
logger = logging.getLogger(__name__)
hive_type_map = {
'BOOLEAN': pd.np.dtype(bool),
'BINARY': pd.np.dtype(bytes),
'TINYINT': pd.np.dtype(int),
'SMALLINT': pd.np.dtype(int),
'INT': pd.np.dtype(int),
'BIGINT': pd.np.dtype(int),
'FLOAT': pd.np.dtype(float),
'DOUBLE': pd.np.dtype(float),
'DECIMAL': pd.np.dtype(float),
'TIMESTAMP': pd.np.dtype('datetime64'),
'DATE': pd.np.dtype('datetime64'),
'STRING': pd.np.dtype(str),
'VARCHAR': pd.np.dtype(str),
'CHAR': pd.np.dtype(str),
'ARRAY': pd.np.dtype(list),
'MAP': pd.np.dtype(dict),
'STRUCT': pd.np.dtype(object),
'UNIONTYPE': pd.np.dtype(object),
}
class AioHive:
def __init__(self, host=None, config=None, port=10000):
"""
coroutine based hive client
Parameters
==========
host : str
host of the hiveserver2 to connect to
port : int, default 10000
port of the hiveserver2
"""
if (host is None and config is None) or (config and host):
raise TypeError('Either host or config argument has to be supplied')
if config:
import xml.etree.ElementTree as ET
cfg = ET.parse(config)
for res in cfg.iter('property'):
if res.findtext('name') == 'hive.metastore.uris':
uri = res.findtext('value')
host = uri.split('://')[-1].split(':')[0]
break
else:
raise ValueError(
"could not find 'hive.metastore.uris' in config")
self.cli = aiohs2.Client(host=host, port=port)
@coroutine
def execute(self, request):
""" execute request without looking at returns """
cur = yield from self.cli.cursor()
try:
yield from cur.execute(request)
finally:
yield from cur.close()
@staticmethod
def get_dtype(typ):
try:
return hive_type_map[typ.rsplit('<', 1)[0].rsplit('_', 1)[0]]
except KeyError:
logger.warning('Unknown type %r for hive request', typ)
return pd.np.dtype(object)
@coroutine
def fetch(self, hql, chunk_size=10000):
""" execute request and fetch answer as DataFrame """
cur = yield from self.cli.cursor()
try:
yield from cur.execute(hql)
schema = yield from cur.getSchema()
columns = pd.Index([nfo['columnName'] for nfo in schema])
dtypes = [self.get_dtype(nfo['type']) for nfo in schema]
data = (yield from cur.fetch(maxRows=chunk_size)) or None
df = pd.DataFrame(data, columns=columns, dtype=object)
for col, typ in zip(columns, dtypes):
if typ == pd.np.dtype('datetime64') and df[col].isnull().all():
df[col] = pd.NaT
else:
try:
df[col] = df[col].astype(typ)
except TypeError as e:
logger.warning('Cannot convert %r to %r (%s)',
col, typ, e)
return df
finally:
yield from cur.close()
def iter(self, hql, chunk_size=10000):
""" execute request and iterate over chunks of resulting DataFrame """
cur = yield from self.cli.cursor()
try:
yield from cur.execute(hql)
schema = yield from cur.getSchema()
columns = pd.Index([nfo['columnName'] for nfo in schema])
dtypes = [self.get_dtype(nfo['type']) for nfo in schema]
chunks = cur.iter(maxRows=chunk_size)
class local:
offset = 0
empty = None
warns = set()
@coroutine
def to_frame(chunk_co):
df = pd.DataFrame((yield from chunk_co) or local.empty,
columns=columns, dtype=object)
df.index += local.offset
local.offset += len(df)
if local.empty is None:
local.empty = df[:0].copy()
for col, typ in zip(columns, dtypes):
try:
df[col] = df[col].astype(typ)
except TypeError as e:
if col not in local.warns:
logger.warning('Cannot convert %r to %r (%s)',
col, typ, e, exc_info=True)
local.warns.add(col)
return df
def closing():
try:
for chunk in chunks:
# here we yield the coroutine that will fetch the data
# and put in in a frame
yield to_frame(chunk)
finally:
# while ensuring that the cursor is closed ...
cur.close()
return closing()
finally:
cur.close()
class SyncedHive:
def __init__(self, *args, hive=None, **kws):
"""
synced wrapper around the asyncio hive class
Parameters
==========
host : str
host of the hiveserver2 to connect to
port : int, default 10000
port of the hiveserver2
hive : AioHive, optional
existing async hive client
"""
self.hive = hive or AioHive(*args, **kws)
self.loop = asyncio.get_event_loop()
def run(self, coro):
return self.loop.run_until_complete(coro)
def synced(name):
func = getattr(AioHive, name)
@wraps(func)
def synced(self, *args, **kws):
return self.run(func(self.hive, *args, **kws))
return synced
execute = synced('execute')
fetch = synced('fetch')
def iter(self, *args, **kws):
for chunk in self.run(self.hive.iter(*args, **kws)):
data = self.run(chunk)
if not data.empty:
yield data
Hive = SyncedHive
less verboose warnings
import asyncio
import aiohs2
import pandas as pd
import logging
from functools import wraps
coroutine = asyncio.coroutine
logger = logging.getLogger(__name__)
hive_type_map = {
'BOOLEAN': pd.np.dtype(bool),
'BINARY': pd.np.dtype(bytes),
'TINYINT': pd.np.dtype(int),
'SMALLINT': pd.np.dtype(int),
'INT': pd.np.dtype(int),
'BIGINT': pd.np.dtype(int),
'FLOAT': pd.np.dtype(float),
'DOUBLE': pd.np.dtype(float),
'DECIMAL': pd.np.dtype(float),
'TIMESTAMP': pd.np.dtype('datetime64'),
'DATE': pd.np.dtype('datetime64'),
'STRING': pd.np.dtype(str),
'VARCHAR': pd.np.dtype(str),
'CHAR': pd.np.dtype(str),
'ARRAY': pd.np.dtype(list),
'MAP': pd.np.dtype(dict),
'STRUCT': pd.np.dtype(object),
'UNIONTYPE': pd.np.dtype(object),
}
class AioHive:
def __init__(self, host=None, config=None, port=10000):
"""
coroutine based hive client
Parameters
==========
host : str
host of the hiveserver2 to connect to
port : int, default 10000
port of the hiveserver2
"""
if (host is None and config is None) or (config and host):
raise TypeError('Either host or config argument has to be supplied')
if config:
import xml.etree.ElementTree as ET
cfg = ET.parse(config)
for res in cfg.iter('property'):
if res.findtext('name') == 'hive.metastore.uris':
uri = res.findtext('value')
host = uri.split('://')[-1].split(':')[0]
break
else:
raise ValueError(
"could not find 'hive.metastore.uris' in config")
self.cli = aiohs2.Client(host=host, port=port)
@coroutine
def execute(self, request):
""" execute request without looking at returns """
cur = yield from self.cli.cursor()
try:
yield from cur.execute(request)
finally:
yield from cur.close()
@staticmethod
def get_dtype(typ):
try:
return hive_type_map[typ.rsplit('<', 1)[0].rsplit('_', 1)[0]]
except KeyError:
logger.warning('Unknown type %r for hive request', typ)
return pd.np.dtype(object)
@coroutine
def fetch(self, hql, chunk_size=10000):
""" execute request and fetch answer as DataFrame """
cur = yield from self.cli.cursor()
try:
yield from cur.execute(hql)
schema = yield from cur.getSchema()
columns = pd.Index([nfo['columnName'] for nfo in schema])
dtypes = [self.get_dtype(nfo['type']) for nfo in schema]
data = (yield from cur.fetch(maxRows=chunk_size)) or None
df = pd.DataFrame(data, columns=columns, dtype=object)
for col, typ in zip(columns, dtypes):
if typ == pd.np.dtype('datetime64') and df[col].isnull().all():
df[col] = pd.NaT
else:
try:
df[col] = df[col].astype(typ)
except TypeError as e:
logger.warning('Cannot convert %r to %r (%s)',
col, typ, e)
return df
finally:
yield from cur.close()
def iter(self, hql, chunk_size=10000):
""" execute request and iterate over chunks of resulting DataFrame """
cur = yield from self.cli.cursor()
try:
yield from cur.execute(hql)
schema = yield from cur.getSchema()
columns = pd.Index([nfo['columnName'] for nfo in schema])
dtypes = [self.get_dtype(nfo['type']) for nfo in schema]
chunks = cur.iter(maxRows=chunk_size)
class local:
offset = 0
empty = None
warns = set()
@coroutine
def to_frame(chunk_co):
df = pd.DataFrame((yield from chunk_co) or local.empty,
columns=columns, dtype=object)
df.index += local.offset
local.offset += len(df)
if local.empty is None:
local.empty = df[:0].copy()
for col, typ in zip(columns, dtypes):
try:
df[col] = df[col].astype(typ)
except TypeError as e:
if col not in local.warns:
logger.warning('Cannot convert %r to %r (%s)',
col, typ, e)
local.warns.add(col)
return df
def closing():
try:
for chunk in chunks:
# here we yield the coroutine that will fetch the data
# and put in in a frame
yield to_frame(chunk)
finally:
# while ensuring that the cursor is closed ...
cur.close()
return closing()
finally:
cur.close()
class SyncedHive:
def __init__(self, *args, hive=None, **kws):
"""
synced wrapper around the asyncio hive class
Parameters
==========
host : str
host of the hiveserver2 to connect to
port : int, default 10000
port of the hiveserver2
hive : AioHive, optional
existing async hive client
"""
self.hive = hive or AioHive(*args, **kws)
self.loop = asyncio.get_event_loop()
def run(self, coro):
return self.loop.run_until_complete(coro)
def synced(name):
func = getattr(AioHive, name)
@wraps(func)
def synced(self, *args, **kws):
return self.run(func(self.hive, *args, **kws))
return synced
execute = synced('execute')
fetch = synced('fetch')
def iter(self, *args, **kws):
for chunk in self.run(self.hive.iter(*args, **kws)):
data = self.run(chunk)
if not data.empty:
yield data
Hive = SyncedHive
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals, print_function
from six import iteritems, text_type, string_types
from werkzeug.local import Local, release_local
import os, sys, importlib, inspect, json
# public
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template, get_email_from_template
__version__ = '9.1.4'
__title__ = "Frappe Framework"
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
from frappe.translate import get_full_dict
if not hasattr(local, 'lang'):
local.lang = lang or 'en'
if not lang:
lang = local.lang
# msg should always be unicode
msg = as_unicode(msg).strip()
# return lang_full_dict according to lang passed parameter
return get_full_dict(lang).get(msg) or msg
def as_unicode(text, encoding='utf-8'):
'''Convert to unicode if required'''
if isinstance(text, text_type):
return text
elif text==None:
return ''
elif isinstance(text, string_types):
return text_type(text, encoding)
else:
return text_type(text)
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None, new_site=False):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"currently_saving": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
"new_site": new_site
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = _dict()
local.user = None
local.user_perms = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.link_count = {}
local.jenv = None
local.jloader =None
local.cache = {}
local.meta_cache = {}
local.form_dict = _dict()
local.session = _dict()
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from frappe.database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
elif local.site and not local.flags.new_site:
print("{0} does not exist".format(local.site))
sys.exit(1)
#raise IncorrectSitePath, "{0} does not exist".format(site_config)
return _dict(config)
def get_conf(site=None):
if hasattr(local, 'conf'):
return local.conf
else:
# if no site, get from common_site_config.json
with init_site(site):
return local.conf
class init_site:
def __init__(self, site=None):
'''If site==None, initialize it for empty site ('') to load common_site_config.json'''
self.site = site or ''
def __enter__(self):
init(self.site)
return local
def __exit__(self, type, value, traceback):
destroy()
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get('redis_cache')
or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
from frappe.utils import get_traceback
return get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
msg = as_unicode(msg)
if not request or (not "cmd" in local.form_dict) or conf.developer_mode:
print(msg.encode('utf-8'))
error_log.append(msg)
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print(repr(msg))
debug_log.append(as_unicode(msg))
def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param title: [optional] Message title.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
from frappe.utils import encode
out = _dict(message=msg)
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception(encode(msg))
else:
raise ValidationError(encode(msg))
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages and out.msg:
print("Message: " + repr(out.msg).encode("utf-8"))
if title:
out.title = title
if not indicator and raise_exception:
indicator = 'red'
if indicator:
out.indicator = indicator
if alert:
out.alert = 1
message_log.append(json.dumps(out))
_raise_exception()
def clear_messages():
local.message_log = []
def throw(msg, exc=ValidationError, title=None):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc, title=title, indicator='red')
def emit_js(js, user=False, **kwargs):
from frappe.async import publish_realtime
if user == False:
user = session.user
publish_realtime('eval_js', js, user=user, **kwargs)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_perms = None
def get_user():
from frappe.utils.user import UserPermissions
if not local.user_perms:
local.user_perms = UserPermissions(local.session.user)
return local.user_perms
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.permissions
return frappe.permissions.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=[], sender="", subject="No Subject", message="No Message",
as_markdown=False, delayed=True, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=[], message_id=None, in_reply_to=None, send_after=None, expose_recipients=None,
send_priority=1, communication=None, retry=1, now=None, read_receipt=None, is_notification=False,
inline_images=None, template=None, args=None, header=None):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param delayed: Send via scheduled email sender **Email Queue**. Don't send immediately. Default is true
:param send_priority: Priority for Email Queue, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To Email Address.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
:param communication: Communication link to be set in Email Queue record
:param inline_images: List of inline images as {"filename", "filecontent"}. All src properties will be replaced with random Content-Id
:param template: Name of html template from templates/emails folder
:param args: Arguments for rendering the template
:param header: Append header in email
"""
text_content = None
if template:
message, text_content = get_email_from_template(template, args)
message = content or message
if as_markdown:
from markdown2 import markdown
message = markdown(message)
if not delayed:
now = True
from frappe.email import queue
queue.send(recipients=recipients, sender=sender,
subject=subject, message=message, text_content=text_content,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, message_id=message_id, in_reply_to=in_reply_to,
send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority,
communication=communication, now=now, read_receipt=read_receipt, is_notification=is_notification,
inline_images=inline_images, header=header)
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if local.flags.in_test:
return
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.sessions
from frappe.core.doctype.domain_settings.domain_settings import clear_domain_cache
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
from frappe import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
clear_domain_cache()
local.cache = {}
local.new_doc_templates = {}
for fn in get_hooks("clear_cache"):
get_attr(fn)()
local.role_permissions = {}
def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not doctype and doc:
doctype = doc.doctype
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doc=None, ptype='read', user=None, verbose=False, doctype=None):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
if doc:
if isinstance(doc, string_types):
doc = get_doc(doctype, doc)
doctype = doc.doctype
if doc.flags.ignore_permissions:
return True
# check permission in controller
if hasattr(doc, 'has_website_permission'):
return doc.has_website_permission(ptype, verbose=verbose)
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
for method in hooks:
result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224(((txt or "") + repr(time.time()) + repr(random_string(8))).encode()).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value=None):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(*args, **kwargs):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
return frappe.model.document.get_doc(*args, **kwargs)
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags, ignore_on_trash, ignore_missing)
def delete_doc_if_exists(doctype, name, force=0):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name, force=force)
def reload_doctype(doctype, force=False, reset_permissions=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype),
force=force, reset_permissions=reset_permissions)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions)
def rename_doc(*args, **kwargs):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(*args, **kwargs)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
for app in get_file_items(os.path.join(local.site_path, "apps.txt")):
if app not in apps:
apps.append(app)
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_doc_hooks():
'''Returns hooked methods for given doc. It will expand the dict tuple if required.'''
if not hasattr(local, 'doc_events_hooks'):
hooks = get_hooks('doc_events', {})
out = {}
for key, value in iteritems(hooks):
if isinstance(key, tuple):
for doctype in key:
append_hook(out, doctype, value)
else:
append_hook(out, key, value)
local.doc_events_hooks = out
return local.doc_events_hooks
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
print('Could not find app "{0}"'.format(app_name))
if not request:
sys.exit(1)
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def append_hook(target, key, value):
'''appends a hook to the the target dict.
If the hook key, exists, it will make it a key.
If the hook value is a dict, like doc_events, it will
listify the values against the key.
'''
if isinstance(value, dict):
# dict? make a list of values against each key
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
# make a list
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
if isinstance(path, text_type):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return as_unicode(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if isinstance(fn, string_types):
fn = get_attr(fn)
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties).
If doctype is not specified, it will create a property setter for all fields with the
given fieldname"""
args = _dict(args)
if not args.doctype_or_field:
args.doctype_or_field = 'DocField'
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data'
if not args.doctype:
doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname)
else:
doctype_list = [args.doctype]
for doctype in doctype_list:
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data'
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field,
'doc_type': doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.validate_fieldtype_change()
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import Tool."""
from frappe.core.page.data_import_tool import data_import_tool
data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by']
if not local.flags.in_test:
fields_to_clear.append("docstatus")
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.set("__islocal", 1)
for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']):
newdoc.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for i, d in enumerate(newdoc.get_all_children()):
d.set("__islocal", 1)
for fieldname in fields_to_clear:
d.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None,
context=None, indicator_color=None, primary_action='/', primary_label = None, fullpage=False,
width=None):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code
:param context: web template context
:param indicator_color: color of indicator in title
:param primary_action: route on primary button (default is `/`)
:param primary_label: label on primary button (defaut is "Home")
:param fullpage: hide header / footer
:param width: Width of message in pixels
"""
local.message_title = title
local.message = html
local.response['type'] = 'page'
local.response['route'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
if not context:
context = {}
if not indicator_color:
if success:
indicator_color = 'green'
elif http_status_code and http_status_code > 300:
indicator_color = 'red'
else:
indicator_color = 'blue'
context['indicator_color'] = indicator_color
context['primary_label'] = primary_label
context['primary_action'] = primary_action
context['error_code'] = http_status_code
context['fullpage'] = fullpage
if width:
context['card_width'] = width
local.response['context'] = context
def redirect_to_message(title, html, http_status_code=None, context=None, indicator_color=None):
"""Redirects to /message?id=random
Similar to respond_as_web_page, but used to 'redirect' and show message pages like success, failure, etc. with a detailed message
:param title: Page title and heading.
:param message: Message to be shown.
:param http_status_code: HTTP status code.
Example Usage:
frappe.redirect_to_message(_('Thank you'), "<div><p>You will receive an email at test@example.com</p></div>")
"""
message_id = generate_hash(length=8)
message = {
'context': context or {},
'http_status_code': http_status_code or 200
}
message['context'].update({
'header': title,
'title': title,
'message': html
})
if indicator_color:
message['context'].update({
"indicator_color": indicator_color
})
cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60)
location = '/message?id={0}'.format(message_id)
if not getattr(local, 'is_ajax', False):
local.response["type"] = "redirect"
local.response["location"] = location
else:
return location
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for conditions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler)
def are_emails_muted():
from frappe.utils import cint
return flags.mute_emails or cint(conf.get("mute_emails") or 0) or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def format(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output = None):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
local.form_dict.doc = doc
if not html:
html = build_page("printview")
if as_pdf:
return get_pdf(html, output = output)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
local.flags.ignore_print_permissions = True
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
return out
def publish_progress(*args, **kwargs):
"""Show the user progress for a long request
:param percent: Percent progress
:param title: Title
:param doctype: Optional, for DocType
:param name: Optional, for Document name
"""
import frappe.async
return frappe.async.publish_progress(*args, **kwargs)
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.async
return frappe.async.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def enqueue(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param event: this is passed to enable clearing of jobs from queues
:param async: (optional) if async=False, the method is executed immediately, else via a worker
:param job_name: (optional) can be used to name an enqueue call, which can be used to prevent duplicate calls
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue(*args, **kwargs)
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
loggers = {}
log_level = None
def logger(module=None, with_more_info=True):
'''Returns a python logger that uses StreamHandler'''
from frappe.utils.logger import get_logger
return get_logger(module or 'default', with_more_info=with_more_info)
def log_error(message=None, title=None):
'''Log error to Error Log'''
get_doc(dict(doctype='Error Log', error=as_unicode(message or get_traceback()),
method=title)).insert(ignore_permissions=True)
def get_desk_link(doctype, name):
return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype))
def bold(text):
return '<b>{0}</b>'.format(text)
def safe_eval(code, eval_globals=None, eval_locals=None):
'''A safer `eval`'''
whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round
}
if '__' in code:
throw('Illegal rule {0}. Cannot use "__"'.format(bold(code)))
if not eval_globals:
eval_globals = {}
eval_globals['__builtins__'] = {}
eval_globals.update(whitelisted_globals)
return eval(code, eval_globals, eval_locals)
def get_system_settings(key):
if key not in local.system_settings:
local.system_settings.update({key: db.get_single_value('System Settings', key)})
return local.system_settings.get(key)
def get_active_domains():
from frappe.core.doctype.domain_settings.domain_settings import get_active_domains
return get_active_domains()
bumped to version 9.1.5
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
"""
globals attached to frappe module
+ some utility functions that should probably be moved
"""
from __future__ import unicode_literals, print_function
from six import iteritems, text_type, string_types
from werkzeug.local import Local, release_local
import os, sys, importlib, inspect, json
# public
from .exceptions import *
from .utils.jinja import get_jenv, get_template, render_template, get_email_from_template
__version__ = '9.1.5'
__title__ = "Frappe Framework"
local = Local()
class _dict(dict):
"""dict like object that exposes keys as attributes"""
def __getattr__(self, key):
ret = self.get(key)
if not ret and key.startswith("__"):
raise AttributeError()
return ret
def __setattr__(self, key, value):
self[key] = value
def __getstate__(self):
return self
def __setstate__(self, d):
self.update(d)
def update(self, d):
"""update and return self -- the missing dict feature in python"""
super(_dict, self).update(d)
return self
def copy(self):
return _dict(dict(self).copy())
def _(msg, lang=None):
"""Returns translated string in current lang, if exists."""
from frappe.translate import get_full_dict
if not hasattr(local, 'lang'):
local.lang = lang or 'en'
if not lang:
lang = local.lang
# msg should always be unicode
msg = as_unicode(msg).strip()
# return lang_full_dict according to lang passed parameter
return get_full_dict(lang).get(msg) or msg
def as_unicode(text, encoding='utf-8'):
'''Convert to unicode if required'''
if isinstance(text, text_type):
return text
elif text==None:
return ''
elif isinstance(text, string_types):
return text_type(text, encoding)
else:
return text_type(text)
def get_lang_dict(fortype, name=None):
"""Returns the translated language dict for the given type and name.
:param fortype: must be one of `doctype`, `page`, `report`, `include`, `jsfile`, `boot`
:param name: name of the document for which assets are to be returned."""
from frappe.translate import get_dict
return get_dict(fortype, name)
def set_user_lang(user, user_language=None):
"""Guess and set user language for the session. `frappe.local.lang`"""
from frappe.translate import get_user_lang
local.lang = get_user_lang(user)
# local-globals
db = local("db")
conf = local("conf")
form = form_dict = local("form_dict")
request = local("request")
response = local("response")
session = local("session")
user = local("user")
flags = local("flags")
error_log = local("error_log")
debug_log = local("debug_log")
message_log = local("message_log")
lang = local("lang")
def init(site, sites_path=None, new_site=False):
"""Initialize frappe for the current site. Reset thread locals `frappe.local`"""
if getattr(local, "initialised", None):
return
if not sites_path:
sites_path = '.'
local.error_log = []
local.message_log = []
local.debug_log = []
local.realtime_log = []
local.flags = _dict({
"ran_schedulers": [],
"currently_saving": [],
"redirect_location": "",
"in_install_db": False,
"in_install_app": False,
"in_import": False,
"in_test": False,
"mute_messages": False,
"ignore_links": False,
"mute_emails": False,
"has_dataurl": False,
"new_site": new_site
})
local.rollback_observers = []
local.test_objects = {}
local.site = site
local.sites_path = sites_path
local.site_path = os.path.join(sites_path, site)
local.request_ip = None
local.response = _dict({"docs":[]})
local.task_id = None
local.conf = _dict(get_site_config())
local.lang = local.conf.lang or "en"
local.lang_full_dict = None
local.module_app = None
local.app_modules = None
local.system_settings = _dict()
local.user = None
local.user_perms = None
local.session = None
local.role_permissions = {}
local.valid_columns = {}
local.new_doc_templates = {}
local.link_count = {}
local.jenv = None
local.jloader =None
local.cache = {}
local.meta_cache = {}
local.form_dict = _dict()
local.session = _dict()
setup_module_map()
local.initialised = True
def connect(site=None, db_name=None):
"""Connect to site database instance.
:param site: If site is given, calls `frappe.init`.
:param db_name: Optional. Will use from `site_config.json`."""
from frappe.database import Database
if site:
init(site)
local.db = Database(user=db_name or local.conf.db_name)
set_user("Administrator")
def get_site_config(sites_path=None, site_path=None):
"""Returns `site_config.json` combined with `sites/common_site_config.json`.
`site_config` is a set of site wide settings like database name, password, email etc."""
config = {}
sites_path = sites_path or getattr(local, "sites_path", None)
site_path = site_path or getattr(local, "site_path", None)
if sites_path:
common_site_config = os.path.join(sites_path, "common_site_config.json")
if os.path.exists(common_site_config):
config.update(get_file_json(common_site_config))
if site_path:
site_config = os.path.join(site_path, "site_config.json")
if os.path.exists(site_config):
config.update(get_file_json(site_config))
elif local.site and not local.flags.new_site:
print("{0} does not exist".format(local.site))
sys.exit(1)
#raise IncorrectSitePath, "{0} does not exist".format(site_config)
return _dict(config)
def get_conf(site=None):
if hasattr(local, 'conf'):
return local.conf
else:
# if no site, get from common_site_config.json
with init_site(site):
return local.conf
class init_site:
def __init__(self, site=None):
'''If site==None, initialize it for empty site ('') to load common_site_config.json'''
self.site = site or ''
def __enter__(self):
init(self.site)
return local
def __exit__(self, type, value, traceback):
destroy()
def destroy():
"""Closes connection and releases werkzeug local."""
if db:
db.close()
release_local(local)
# memcache
redis_server = None
def cache():
"""Returns memcache connection."""
global redis_server
if not redis_server:
from frappe.utils.redis_wrapper import RedisWrapper
redis_server = RedisWrapper.from_url(conf.get('redis_cache')
or "redis://localhost:11311")
return redis_server
def get_traceback():
"""Returns error traceback."""
from frappe.utils import get_traceback
return get_traceback()
def errprint(msg):
"""Log error. This is sent back as `exc` in response.
:param msg: Message."""
msg = as_unicode(msg)
if not request or (not "cmd" in local.form_dict) or conf.developer_mode:
print(msg.encode('utf-8'))
error_log.append(msg)
def log(msg):
"""Add to `debug_log`.
:param msg: Message."""
if not request:
if conf.get("logging") or False:
print(repr(msg))
debug_log.append(as_unicode(msg))
def msgprint(msg, title=None, raise_exception=0, as_table=False, indicator=None, alert=False):
"""Print a message to the user (via HTTP response).
Messages are sent in the `__server_messages` property in the
response JSON and shown in a pop-up / modal.
:param msg: Message.
:param title: [optional] Message title.
:param raise_exception: [optional] Raise given exception and show message.
:param as_table: [optional] If `msg` is a list of lists, render as HTML table.
"""
from frappe.utils import encode
out = _dict(message=msg)
def _raise_exception():
if raise_exception:
if flags.rollback_on_exception:
db.rollback()
import inspect
if inspect.isclass(raise_exception) and issubclass(raise_exception, Exception):
raise raise_exception(encode(msg))
else:
raise ValidationError(encode(msg))
if flags.mute_messages:
_raise_exception()
return
if as_table and type(msg) in (list, tuple):
out.msg = '<table border="1px" style="border-collapse: collapse" cellpadding="2px">' + ''.join(['<tr>'+''.join(['<td>%s</td>' % c for c in r])+'</tr>' for r in msg]) + '</table>'
if flags.print_messages and out.msg:
print("Message: " + repr(out.msg).encode("utf-8"))
if title:
out.title = title
if not indicator and raise_exception:
indicator = 'red'
if indicator:
out.indicator = indicator
if alert:
out.alert = 1
message_log.append(json.dumps(out))
_raise_exception()
def clear_messages():
local.message_log = []
def throw(msg, exc=ValidationError, title=None):
"""Throw execption and show message (`msgprint`).
:param msg: Message.
:param exc: Exception class. Default `frappe.ValidationError`"""
msgprint(msg, raise_exception=exc, title=title, indicator='red')
def emit_js(js, user=False, **kwargs):
from frappe.async import publish_realtime
if user == False:
user = session.user
publish_realtime('eval_js', js, user=user, **kwargs)
def create_folder(path, with_init=False):
"""Create a folder in the given path and add an `__init__.py` file (optional).
:param path: Folder path.
:param with_init: Create `__init__.py` in the new folder."""
from frappe.utils import touch_file
if not os.path.exists(path):
os.makedirs(path)
if with_init:
touch_file(os.path.join(path, "__init__.py"))
def set_user(username):
"""Set current user.
:param username: **User** name to set as current user."""
local.session.user = username
local.session.sid = username
local.cache = {}
local.form_dict = _dict()
local.jenv = None
local.session.data = _dict()
local.role_permissions = {}
local.new_doc_templates = {}
local.user_perms = None
def get_user():
from frappe.utils.user import UserPermissions
if not local.user_perms:
local.user_perms = UserPermissions(local.session.user)
return local.user_perms
def get_roles(username=None):
"""Returns roles of current user."""
if not local.session:
return ["Guest"]
if username:
import frappe.permissions
return frappe.permissions.get_roles(username)
else:
return get_user().get_roles()
def get_request_header(key, default=None):
"""Return HTTP request header.
:param key: HTTP header key.
:param default: Default value."""
return request.headers.get(key, default)
def sendmail(recipients=[], sender="", subject="No Subject", message="No Message",
as_markdown=False, delayed=True, reference_doctype=None, reference_name=None,
unsubscribe_method=None, unsubscribe_params=None, unsubscribe_message=None,
attachments=None, content=None, doctype=None, name=None, reply_to=None,
cc=[], message_id=None, in_reply_to=None, send_after=None, expose_recipients=None,
send_priority=1, communication=None, retry=1, now=None, read_receipt=None, is_notification=False,
inline_images=None, template=None, args=None, header=None):
"""Send email using user's default **Email Account** or global default **Email Account**.
:param recipients: List of recipients.
:param sender: Email sender. Default is current user.
:param subject: Email Subject.
:param message: (or `content`) Email Content.
:param as_markdown: Convert content markdown to HTML.
:param delayed: Send via scheduled email sender **Email Queue**. Don't send immediately. Default is true
:param send_priority: Priority for Email Queue, default 1.
:param reference_doctype: (or `doctype`) Append as communication to this DocType.
:param reference_name: (or `name`) Append as communication to this document name.
:param unsubscribe_method: Unsubscribe url with options email, doctype, name. e.g. `/api/method/unsubscribe`
:param unsubscribe_params: Unsubscribe paramaters to be loaded on the unsubscribe_method [optional] (dict).
:param attachments: List of attachments.
:param reply_to: Reply-To Email Address.
:param message_id: Used for threading. If a reply is received to this email, Message-Id is sent back as In-Reply-To in received email.
:param in_reply_to: Used to send the Message-Id of a received email back as In-Reply-To.
:param send_after: Send after the given datetime.
:param expose_recipients: Display all recipients in the footer message - "This email was sent to"
:param communication: Communication link to be set in Email Queue record
:param inline_images: List of inline images as {"filename", "filecontent"}. All src properties will be replaced with random Content-Id
:param template: Name of html template from templates/emails folder
:param args: Arguments for rendering the template
:param header: Append header in email
"""
text_content = None
if template:
message, text_content = get_email_from_template(template, args)
message = content or message
if as_markdown:
from markdown2 import markdown
message = markdown(message)
if not delayed:
now = True
from frappe.email import queue
queue.send(recipients=recipients, sender=sender,
subject=subject, message=message, text_content=text_content,
reference_doctype = doctype or reference_doctype, reference_name = name or reference_name,
unsubscribe_method=unsubscribe_method, unsubscribe_params=unsubscribe_params, unsubscribe_message=unsubscribe_message,
attachments=attachments, reply_to=reply_to, cc=cc, message_id=message_id, in_reply_to=in_reply_to,
send_after=send_after, expose_recipients=expose_recipients, send_priority=send_priority,
communication=communication, now=now, read_receipt=read_receipt, is_notification=is_notification,
inline_images=inline_images, header=header)
whitelisted = []
guest_methods = []
xss_safe_methods = []
def whitelist(allow_guest=False, xss_safe=False):
"""
Decorator for whitelisting a function and making it accessible via HTTP.
Standard request will be `/api/method/[path.to.method]`
:param allow_guest: Allow non logged-in user to access this method.
Use as:
@frappe.whitelist()
def myfunc(param1, param2):
pass
"""
def innerfn(fn):
global whitelisted, guest_methods, xss_safe_methods
whitelisted.append(fn)
if allow_guest:
guest_methods.append(fn)
if xss_safe:
xss_safe_methods.append(fn)
return fn
return innerfn
def only_for(roles):
"""Raise `frappe.PermissionError` if the user does not have any of the given **Roles**.
:param roles: List of roles to check."""
if local.flags.in_test:
return
if not isinstance(roles, (tuple, list)):
roles = (roles,)
roles = set(roles)
myroles = set(get_roles())
if not roles.intersection(myroles):
raise PermissionError
def clear_cache(user=None, doctype=None):
"""Clear **User**, **DocType** or global cache.
:param user: If user is given, only user cache is cleared.
:param doctype: If doctype is given, only DocType cache is cleared."""
import frappe.sessions
from frappe.core.doctype.domain_settings.domain_settings import clear_domain_cache
if doctype:
import frappe.model.meta
frappe.model.meta.clear_cache(doctype)
reset_metadata_version()
elif user:
frappe.sessions.clear_cache(user)
else: # everything
from frappe import translate
frappe.sessions.clear_cache()
translate.clear_cache()
reset_metadata_version()
clear_domain_cache()
local.cache = {}
local.new_doc_templates = {}
for fn in get_hooks("clear_cache"):
get_attr(fn)()
local.role_permissions = {}
def has_permission(doctype=None, ptype="read", doc=None, user=None, verbose=False, throw=False):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: [optional] Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not doctype and doc:
doctype = doc.doctype
import frappe.permissions
out = frappe.permissions.has_permission(doctype, ptype, doc=doc, verbose=verbose, user=user)
if throw and not out:
if doc:
frappe.throw(_("No permission for {0}").format(doc.doctype + " " + doc.name))
else:
frappe.throw(_("No permission for {0}").format(doctype))
return out
def has_website_permission(doc=None, ptype='read', user=None, verbose=False, doctype=None):
"""Raises `frappe.PermissionError` if not permitted.
:param doctype: DocType for which permission is to be check.
:param ptype: Permission type (`read`, `write`, `create`, `submit`, `cancel`, `amend`). Default: `read`.
:param doc: Checks User permissions for given doc.
:param user: [optional] Check for given user. Default: current user."""
if not user:
user = session.user
if doc:
if isinstance(doc, string_types):
doc = get_doc(doctype, doc)
doctype = doc.doctype
if doc.flags.ignore_permissions:
return True
# check permission in controller
if hasattr(doc, 'has_website_permission'):
return doc.has_website_permission(ptype, verbose=verbose)
hooks = (get_hooks("has_website_permission") or {}).get(doctype, [])
if hooks:
for method in hooks:
result = call(method, doc=doc, ptype=ptype, user=user, verbose=verbose)
# if even a single permission check is Falsy
if not result:
return False
# else it is Truthy
return True
else:
return False
def is_table(doctype):
"""Returns True if `istable` property (indicating child Table) is set for given DocType."""
def get_tables():
return db.sql_list("select name from tabDocType where istable=1")
tables = cache().get_value("is_table", get_tables)
return doctype in tables
def get_precision(doctype, fieldname, currency=None, doc=None):
"""Get precision for a given field"""
from frappe.model.meta import get_field_precision
return get_field_precision(get_meta(doctype).get_field(fieldname), doc, currency)
def generate_hash(txt=None, length=None):
"""Generates random hash for given text + current timestamp + random string."""
import hashlib, time
from .utils import random_string
digest = hashlib.sha224(((txt or "") + repr(time.time()) + repr(random_string(8))).encode()).hexdigest()
if length:
digest = digest[:length]
return digest
def reset_metadata_version():
"""Reset `metadata_version` (Client (Javascript) build ID) hash."""
v = generate_hash()
cache().set_value("metadata_version", v)
return v
def new_doc(doctype, parent_doc=None, parentfield=None, as_dict=False):
"""Returns a new document of the given DocType with defaults set.
:param doctype: DocType of the new document.
:param parent_doc: [optional] add to parent document.
:param parentfield: [optional] add against this `parentfield`."""
from frappe.model.create_new import get_new_doc
return get_new_doc(doctype, parent_doc, parentfield, as_dict=as_dict)
def set_value(doctype, docname, fieldname, value=None):
"""Set document value. Calls `frappe.client.set_value`"""
import frappe.client
return frappe.client.set_value(doctype, docname, fieldname, value)
def get_doc(*args, **kwargs):
"""Return a `frappe.model.document.Document` object of the given type and name.
:param arg1: DocType name as string **or** document JSON.
:param arg2: [optional] Document name as string.
Examples:
# insert a new document
todo = frappe.get_doc({"doctype":"ToDo", "description": "test"})
tood.insert()
# open an existing document
todo = frappe.get_doc("ToDo", "TD0001")
"""
import frappe.model.document
return frappe.model.document.get_doc(*args, **kwargs)
def get_last_doc(doctype):
"""Get last created document of this type."""
d = get_all(doctype, ["name"], order_by="creation desc", limit_page_length=1)
if d:
return get_doc(doctype, d[0].name)
else:
raise DoesNotExistError
def get_single(doctype):
"""Return a `frappe.model.document.Document` object of the given Single doctype."""
return get_doc(doctype, doctype)
def get_meta(doctype, cached=True):
"""Get `frappe.model.meta.Meta` instance of given doctype name."""
import frappe.model.meta
return frappe.model.meta.get_meta(doctype, cached=cached)
def get_meta_module(doctype):
import frappe.modules
return frappe.modules.load_doctype_module(doctype)
def delete_doc(doctype=None, name=None, force=0, ignore_doctypes=None, for_reload=False,
ignore_permissions=False, flags=None, ignore_on_trash=False, ignore_missing=True):
"""Delete a document. Calls `frappe.model.delete_doc.delete_doc`.
:param doctype: DocType of document to be delete.
:param name: Name of document to be delete.
:param force: Allow even if document is linked. Warning: This may lead to data integrity errors.
:param ignore_doctypes: Ignore if child table is one of these.
:param for_reload: Call `before_reload` trigger before deleting.
:param ignore_permissions: Ignore user permissions."""
import frappe.model.delete_doc
frappe.model.delete_doc.delete_doc(doctype, name, force, ignore_doctypes, for_reload,
ignore_permissions, flags, ignore_on_trash, ignore_missing)
def delete_doc_if_exists(doctype, name, force=0):
"""Delete document if exists."""
if db.exists(doctype, name):
delete_doc(doctype, name, force=force)
def reload_doctype(doctype, force=False, reset_permissions=False):
"""Reload DocType from model (`[module]/[doctype]/[name]/[name].json`) files."""
reload_doc(scrub(db.get_value("DocType", doctype, "module")), "doctype", scrub(doctype),
force=force, reset_permissions=reset_permissions)
def reload_doc(module, dt=None, dn=None, force=False, reset_permissions=False):
"""Reload Document from model (`[module]/[doctype]/[name]/[name].json`) files.
:param module: Module name.
:param dt: DocType name.
:param dn: Document name.
:param force: Reload even if `modified` timestamp matches.
"""
import frappe.modules
return frappe.modules.reload_doc(module, dt, dn, force=force, reset_permissions=reset_permissions)
def rename_doc(*args, **kwargs):
"""Rename a document. Calls `frappe.model.rename_doc.rename_doc`"""
from frappe.model.rename_doc import rename_doc
return rename_doc(*args, **kwargs)
def get_module(modulename):
"""Returns a module object for given Python module name using `importlib.import_module`."""
return importlib.import_module(modulename)
def scrub(txt):
"""Returns sluggified string. e.g. `Sales Order` becomes `sales_order`."""
return txt.replace(' ','_').replace('-', '_').lower()
def unscrub(txt):
"""Returns titlified string. e.g. `sales_order` becomes `Sales Order`."""
return txt.replace('_',' ').replace('-', ' ').title()
def get_module_path(module, *joins):
"""Get the path of the given module name.
:param module: Module name.
:param *joins: Join additional path elements using `os.path.join`."""
module = scrub(module)
return get_pymodule_path(local.module_app[module] + "." + module, *joins)
def get_app_path(app_name, *joins):
"""Return path of given app.
:param app: App name.
:param *joins: Join additional path elements using `os.path.join`."""
return get_pymodule_path(app_name, *joins)
def get_site_path(*joins):
"""Return path of current site.
:param *joins: Join additional path elements using `os.path.join`."""
return os.path.join(local.site_path, *joins)
def get_pymodule_path(modulename, *joins):
"""Return path of given Python module name.
:param modulename: Python module name.
:param *joins: Join additional path elements using `os.path.join`."""
if not "public" in joins:
joins = [scrub(part) for part in joins]
return os.path.join(os.path.dirname(get_module(scrub(modulename)).__file__), *joins)
def get_module_list(app_name):
"""Get list of modules for given all via `app/modules.txt`."""
return get_file_items(os.path.join(os.path.dirname(get_module(app_name).__file__), "modules.txt"))
def get_all_apps(with_internal_apps=True, sites_path=None):
"""Get list of all apps via `sites/apps.txt`."""
if not sites_path:
sites_path = local.sites_path
apps = get_file_items(os.path.join(sites_path, "apps.txt"), raise_not_found=True)
if with_internal_apps:
for app in get_file_items(os.path.join(local.site_path, "apps.txt")):
if app not in apps:
apps.append(app)
if "frappe" in apps:
apps.remove("frappe")
apps.insert(0, 'frappe')
return apps
def get_installed_apps(sort=False, frappe_last=False):
"""Get list of installed apps in current site."""
if getattr(flags, "in_install_db", True):
return []
if not db:
connect()
installed = json.loads(db.get_global("installed_apps") or "[]")
if sort:
installed = [app for app in get_all_apps(True) if app in installed]
if frappe_last:
if 'frappe' in installed:
installed.remove('frappe')
installed.append('frappe')
return installed
def get_doc_hooks():
'''Returns hooked methods for given doc. It will expand the dict tuple if required.'''
if not hasattr(local, 'doc_events_hooks'):
hooks = get_hooks('doc_events', {})
out = {}
for key, value in iteritems(hooks):
if isinstance(key, tuple):
for doctype in key:
append_hook(out, doctype, value)
else:
append_hook(out, key, value)
local.doc_events_hooks = out
return local.doc_events_hooks
def get_hooks(hook=None, default=None, app_name=None):
"""Get hooks via `app/hooks.py`
:param hook: Name of the hook. Will gather all hooks for this name and return as a list.
:param default: Default if no hook found.
:param app_name: Filter by app."""
def load_app_hooks(app_name=None):
hooks = {}
for app in [app_name] if app_name else get_installed_apps(sort=True):
app = "frappe" if app=="webnotes" else app
try:
app_hooks = get_module(app + ".hooks")
except ImportError:
if local.flags.in_install_app:
# if app is not installed while restoring
# ignore it
pass
print('Could not find app "{0}"'.format(app_name))
if not request:
sys.exit(1)
raise
for key in dir(app_hooks):
if not key.startswith("_"):
append_hook(hooks, key, getattr(app_hooks, key))
return hooks
if app_name:
hooks = _dict(load_app_hooks(app_name))
else:
hooks = _dict(cache().get_value("app_hooks", load_app_hooks))
if hook:
return hooks.get(hook) or (default if default is not None else [])
else:
return hooks
def append_hook(target, key, value):
'''appends a hook to the the target dict.
If the hook key, exists, it will make it a key.
If the hook value is a dict, like doc_events, it will
listify the values against the key.
'''
if isinstance(value, dict):
# dict? make a list of values against each key
target.setdefault(key, {})
for inkey in value:
append_hook(target[key], inkey, value[inkey])
else:
# make a list
target.setdefault(key, [])
if not isinstance(value, list):
value = [value]
target[key].extend(value)
def setup_module_map():
"""Rebuild map of all modules (internal)."""
_cache = cache()
if conf.db_name:
local.app_modules = _cache.get_value("app_modules")
local.module_app = _cache.get_value("module_app")
if not (local.app_modules and local.module_app):
local.module_app, local.app_modules = {}, {}
for app in get_all_apps(True):
if app=="webnotes": app="frappe"
local.app_modules.setdefault(app, [])
for module in get_module_list(app):
module = scrub(module)
local.module_app[module] = app
local.app_modules[app].append(module)
if conf.db_name:
_cache.set_value("app_modules", local.app_modules)
_cache.set_value("module_app", local.module_app)
def get_file_items(path, raise_not_found=False, ignore_empty_lines=True):
"""Returns items from text file as a list. Ignores empty lines."""
import frappe.utils
content = read_file(path, raise_not_found=raise_not_found)
if content:
content = frappe.utils.strip(content)
return [p.strip() for p in content.splitlines() if (not ignore_empty_lines) or (p.strip() and not p.startswith("#"))]
else:
return []
def get_file_json(path):
"""Read a file and return parsed JSON object."""
with open(path, 'r') as f:
return json.load(f)
def read_file(path, raise_not_found=False):
"""Open a file and return its content as Unicode."""
if isinstance(path, text_type):
path = path.encode("utf-8")
if os.path.exists(path):
with open(path, "r") as f:
return as_unicode(f.read())
elif raise_not_found:
raise IOError("{} Not Found".format(path))
else:
return None
def get_attr(method_string):
"""Get python method object from its name."""
app_name = method_string.split(".")[0]
if not local.flags.in_install and app_name not in get_installed_apps():
throw(_("App {0} is not installed").format(app_name), AppNotInstalledError)
modulename = '.'.join(method_string.split('.')[:-1])
methodname = method_string.split('.')[-1]
return getattr(get_module(modulename), methodname)
def call(fn, *args, **kwargs):
"""Call a function and match arguments."""
if isinstance(fn, string_types):
fn = get_attr(fn)
if hasattr(fn, 'fnargs'):
fnargs = fn.fnargs
else:
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
newargs = {}
for a in kwargs:
if (a in fnargs) or varkw:
newargs[a] = kwargs.get(a)
if "flags" in newargs:
del newargs["flags"]
return fn(*args, **newargs)
def make_property_setter(args, ignore_validate=False, validate_fields_for_doctype=True):
"""Create a new **Property Setter** (for overriding DocType and DocField properties).
If doctype is not specified, it will create a property setter for all fields with the
given fieldname"""
args = _dict(args)
if not args.doctype_or_field:
args.doctype_or_field = 'DocField'
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': 'DocField', 'fieldname': args.property}, 'fieldtype') or 'Data'
if not args.doctype:
doctype_list = db.sql_list('select distinct parent from tabDocField where fieldname=%s', args.fieldname)
else:
doctype_list = [args.doctype]
for doctype in doctype_list:
if not args.property_type:
args.property_type = db.get_value('DocField',
{'parent': doctype, 'fieldname': args.fieldname}, 'fieldtype') or 'Data'
ps = get_doc({
'doctype': "Property Setter",
'doctype_or_field': args.doctype_or_field,
'doc_type': doctype,
'field_name': args.fieldname,
'property': args.property,
'value': args.value,
'property_type': args.property_type or "Data",
'__islocal': 1
})
ps.flags.ignore_validate = ignore_validate
ps.flags.validate_fields_for_doctype = validate_fields_for_doctype
ps.validate_fieldtype_change()
ps.insert()
def import_doc(path, ignore_links=False, ignore_insert=False, insert=False):
"""Import a file using Data Import Tool."""
from frappe.core.page.data_import_tool import data_import_tool
data_import_tool.import_doc(path, ignore_links=ignore_links, ignore_insert=ignore_insert, insert=insert)
def copy_doc(doc, ignore_no_copy=True):
""" No_copy fields also get copied."""
import copy
def remove_no_copy_fields(d):
for df in d.meta.get("fields", {"no_copy": 1}):
if hasattr(d, df.fieldname):
d.set(df.fieldname, None)
fields_to_clear = ['name', 'owner', 'creation', 'modified', 'modified_by']
if not local.flags.in_test:
fields_to_clear.append("docstatus")
if not isinstance(doc, dict):
d = doc.as_dict()
else:
d = doc
newdoc = get_doc(copy.deepcopy(d))
newdoc.set("__islocal", 1)
for fieldname in (fields_to_clear + ['amended_from', 'amendment_date']):
newdoc.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(newdoc)
for i, d in enumerate(newdoc.get_all_children()):
d.set("__islocal", 1)
for fieldname in fields_to_clear:
d.set(fieldname, None)
if not ignore_no_copy:
remove_no_copy_fields(d)
return newdoc
def compare(val1, condition, val2):
"""Compare two values using `frappe.utils.compare`
`condition` could be:
- "^"
- "in"
- "not in"
- "="
- "!="
- ">"
- "<"
- ">="
- "<="
- "not None"
- "None"
"""
import frappe.utils
return frappe.utils.compare(val1, condition, val2)
def respond_as_web_page(title, html, success=None, http_status_code=None,
context=None, indicator_color=None, primary_action='/', primary_label = None, fullpage=False,
width=None):
"""Send response as a web page with a message rather than JSON. Used to show permission errors etc.
:param title: Page title and heading.
:param message: Message to be shown.
:param success: Alert message.
:param http_status_code: HTTP status code
:param context: web template context
:param indicator_color: color of indicator in title
:param primary_action: route on primary button (default is `/`)
:param primary_label: label on primary button (defaut is "Home")
:param fullpage: hide header / footer
:param width: Width of message in pixels
"""
local.message_title = title
local.message = html
local.response['type'] = 'page'
local.response['route'] = 'message'
if http_status_code:
local.response['http_status_code'] = http_status_code
if not context:
context = {}
if not indicator_color:
if success:
indicator_color = 'green'
elif http_status_code and http_status_code > 300:
indicator_color = 'red'
else:
indicator_color = 'blue'
context['indicator_color'] = indicator_color
context['primary_label'] = primary_label
context['primary_action'] = primary_action
context['error_code'] = http_status_code
context['fullpage'] = fullpage
if width:
context['card_width'] = width
local.response['context'] = context
def redirect_to_message(title, html, http_status_code=None, context=None, indicator_color=None):
"""Redirects to /message?id=random
Similar to respond_as_web_page, but used to 'redirect' and show message pages like success, failure, etc. with a detailed message
:param title: Page title and heading.
:param message: Message to be shown.
:param http_status_code: HTTP status code.
Example Usage:
frappe.redirect_to_message(_('Thank you'), "<div><p>You will receive an email at test@example.com</p></div>")
"""
message_id = generate_hash(length=8)
message = {
'context': context or {},
'http_status_code': http_status_code or 200
}
message['context'].update({
'header': title,
'title': title,
'message': html
})
if indicator_color:
message['context'].update({
"indicator_color": indicator_color
})
cache().set_value("message_id:{0}".format(message_id), message, expires_in_sec=60)
location = '/message?id={0}'.format(message_id)
if not getattr(local, 'is_ajax', False):
local.response["type"] = "redirect"
local.response["location"] = location
else:
return location
def build_match_conditions(doctype, as_condition=True):
"""Return match (User permissions) for given doctype as list or SQL."""
import frappe.desk.reportview
return frappe.desk.reportview.build_match_conditions(doctype, as_condition)
def get_list(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will also check for permissions.
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_list("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_list("ToDo", fields="*", filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_list("ToDo", fields="*", filters = {"description": ("like", "test%")})
"""
import frappe.model.db_query
return frappe.model.db_query.DatabaseQuery(doctype).execute(None, *args, **kwargs)
def get_all(doctype, *args, **kwargs):
"""List database query via `frappe.model.db_query`. Will **not** check for conditions.
Parameters are same as `frappe.get_list`
:param doctype: DocType on which query is to be made.
:param fields: List of fields or `*`. Default is: `["name"]`.
:param filters: List of filters (see example).
:param order_by: Order By e.g. `modified desc`.
:param limit_page_start: Start results at record #. Default 0.
:param limit_page_length: No of records in the page. Default 20.
Example usage:
# simple dict filter
frappe.get_all("ToDo", fields=["name", "description"], filters = {"owner":"test@example.com"})
# filter as a list of lists
frappe.get_all("ToDo", fields=["*"], filters = [["modified", ">", "2014-01-01"]])
# filter as a list of dicts
frappe.get_all("ToDo", fields=["*"], filters = {"description": ("like", "test%")})
"""
kwargs["ignore_permissions"] = True
if not "limit_page_length" in kwargs:
kwargs["limit_page_length"] = 0
return get_list(doctype, *args, **kwargs)
def get_value(*args, **kwargs):
"""Returns a document property or list of properties.
Alias for `frappe.db.get_value`
:param doctype: DocType name.
:param filters: Filters like `{"x":"y"}` or name of the document. `None` if Single DocType.
:param fieldname: Column name.
:param ignore: Don't raise exception if table, column is missing.
:param as_dict: Return values as dict.
:param debug: Print query in error log.
"""
return db.get_value(*args, **kwargs)
def as_json(obj, indent=1):
from frappe.utils.response import json_handler
return json.dumps(obj, indent=indent, sort_keys=True, default=json_handler)
def are_emails_muted():
from frappe.utils import cint
return flags.mute_emails or cint(conf.get("mute_emails") or 0) or False
def get_test_records(doctype):
"""Returns list of objects from `test_records.json` in the given doctype's folder."""
from frappe.modules import get_doctype_module, get_module_path
path = os.path.join(get_module_path(get_doctype_module(doctype)), "doctype", scrub(doctype), "test_records.json")
if os.path.exists(path):
with open(path, "r") as f:
return json.loads(f.read())
else:
return []
def format_value(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def format(*args, **kwargs):
"""Format value with given field properties.
:param value: Value to be formatted.
:param df: (Optional) DocField object with properties `fieldtype`, `options` etc."""
import frappe.utils.formatters
return frappe.utils.formatters.format_value(*args, **kwargs)
def get_print(doctype=None, name=None, print_format=None, style=None, html=None, as_pdf=False, doc=None, output = None):
"""Get Print Format for given document.
:param doctype: DocType of document.
:param name: Name of document.
:param print_format: Print Format name. Default 'Standard',
:param style: Print Format style.
:param as_pdf: Return as PDF. Default False."""
from frappe.website.render import build_page
from frappe.utils.pdf import get_pdf
local.form_dict.doctype = doctype
local.form_dict.name = name
local.form_dict.format = print_format
local.form_dict.style = style
local.form_dict.doc = doc
if not html:
html = build_page("printview")
if as_pdf:
return get_pdf(html, output = output)
else:
return html
def attach_print(doctype, name, file_name=None, print_format=None, style=None, html=None, doc=None):
from frappe.utils import scrub_urls
if not file_name: file_name = name
file_name = file_name.replace(' ','').replace('/','-')
print_settings = db.get_singles_dict("Print Settings")
local.flags.ignore_print_permissions = True
if int(print_settings.send_print_as_pdf or 0):
out = {
"fname": file_name + ".pdf",
"fcontent": get_print(doctype, name, print_format=print_format, style=style, html=html, as_pdf=True, doc=doc)
}
else:
out = {
"fname": file_name + ".html",
"fcontent": scrub_urls(get_print(doctype, name, print_format=print_format, style=style, html=html, doc=doc)).encode("utf-8")
}
local.flags.ignore_print_permissions = False
return out
def publish_progress(*args, **kwargs):
"""Show the user progress for a long request
:param percent: Percent progress
:param title: Title
:param doctype: Optional, for DocType
:param name: Optional, for Document name
"""
import frappe.async
return frappe.async.publish_progress(*args, **kwargs)
def publish_realtime(*args, **kwargs):
"""Publish real-time updates
:param event: Event name, like `task_progress` etc.
:param message: JSON message object. For async must contain `task_id`
:param room: Room in which to publish update (default entire site)
:param user: Transmit to user
:param doctype: Transmit to doctype, docname
:param docname: Transmit to doctype, docname
:param after_commit: (default False) will emit after current transaction is committed
"""
import frappe.async
return frappe.async.publish_realtime(*args, **kwargs)
def local_cache(namespace, key, generator, regenerate_if_none=False):
"""A key value store for caching within a request
:param namespace: frappe.local.cache[namespace]
:param key: frappe.local.cache[namespace][key] used to retrieve value
:param generator: method to generate a value if not found in store
"""
if namespace not in local.cache:
local.cache[namespace] = {}
if key not in local.cache[namespace]:
local.cache[namespace][key] = generator()
elif local.cache[namespace][key]==None and regenerate_if_none:
# if key exists but the previous result was None
local.cache[namespace][key] = generator()
return local.cache[namespace][key]
def enqueue(*args, **kwargs):
'''
Enqueue method to be executed using a background worker
:param method: method string or method object
:param queue: (optional) should be either long, default or short
:param timeout: (optional) should be set according to the functions
:param event: this is passed to enable clearing of jobs from queues
:param async: (optional) if async=False, the method is executed immediately, else via a worker
:param job_name: (optional) can be used to name an enqueue call, which can be used to prevent duplicate calls
:param kwargs: keyword arguments to be passed to the method
'''
import frappe.utils.background_jobs
return frappe.utils.background_jobs.enqueue(*args, **kwargs)
def get_doctype_app(doctype):
def _get_doctype_app():
doctype_module = local.db.get_value("DocType", doctype, "module")
return local.module_app[scrub(doctype_module)]
return local_cache("doctype_app", doctype, generator=_get_doctype_app)
loggers = {}
log_level = None
def logger(module=None, with_more_info=True):
'''Returns a python logger that uses StreamHandler'''
from frappe.utils.logger import get_logger
return get_logger(module or 'default', with_more_info=with_more_info)
def log_error(message=None, title=None):
'''Log error to Error Log'''
get_doc(dict(doctype='Error Log', error=as_unicode(message or get_traceback()),
method=title)).insert(ignore_permissions=True)
def get_desk_link(doctype, name):
return '<a href="#Form/{0}/{1}" style="font-weight: bold;">{2} {1}</a>'.format(doctype, name, _(doctype))
def bold(text):
return '<b>{0}</b>'.format(text)
def safe_eval(code, eval_globals=None, eval_locals=None):
'''A safer `eval`'''
whitelisted_globals = {
"int": int,
"float": float,
"long": int,
"round": round
}
if '__' in code:
throw('Illegal rule {0}. Cannot use "__"'.format(bold(code)))
if not eval_globals:
eval_globals = {}
eval_globals['__builtins__'] = {}
eval_globals.update(whitelisted_globals)
return eval(code, eval_globals, eval_locals)
def get_system_settings(key):
if key not in local.system_settings:
local.system_settings.update({key: db.get_single_value('System Settings', key)})
return local.system_settings.get(key)
def get_active_domains():
from frappe.core.doctype.domain_settings.domain_settings import get_active_domains
return get_active_domains() |
#!/bin/env python
"""
Given a pair of new regions for each hole number that matched vector sequence,
update the hole number's original region entry with the maximum of the two new
regions.
"""
usage = """%prog filename_prefix new_regions.tab"""
from collections import defaultdict
import csv
import h5py
from optparse import OptionParser
import os
import sys
def update_regions(filename_prefix, regions_filename):
"""
Given a tab-delimited file of regions per hole, find each hole's original
high quality region and resize that region with the largest of the two new
regions when applicable.
"""
filename = filename_prefix
# Read in region coordinates and group them by filename and then by hole number.
regions_by_file = defaultdict(dict)
with open(regions_filename, "r") as fh:
reader = csv.reader(fh, delimiter="\t")
for row in reader:
# Each row looks like this with the first column containing the
# filename/hole_number and the other columns the start and end of
# the non-vector region.
# m120301_070511_42141_c100304962550000001523012408061245_s1_p0/10 418 934
pieces = row[0].split("/")
smrtcell_name, hole_number = pieces[:2]
hole_number = int(hole_number)
row_ints = map(int, row[1:])
regions_by_file[filename].setdefault(hole_number, []).append(row_ints)
# Open each unique base file and search regions by hole number.
for filename, regions_to_update in regions_by_file.iteritems():
sys.stderr.write("Found %i regions to update for %s_masked.bas.h5\n" % (len(regions_to_update), filename))
# Use the first and last hole number to reduce the search space of the
# base file.
first_hole = min(regions_to_update.keys())
last_hole = max(regions_to_update.keys())
# Open the base file in append mode to allow in-place updates to the
# Regions dataset.
total_bases_removed = 0
hdf5_filename = "%s.masked.bas.h5" % filename
if not os.path.exists(hdf5_filename):
raise Exception("File '%s' doesn't exist." % hdf5_filename)
with h5py.File(hdf5_filename, "a") as h5f:
for i in xrange(len(h5f["PulseData/Regions"])):
region = h5f["PulseData/Regions"][i]
hole_number = region[0]
if hole_number < first_hole or region[2] == region[3]:
# Skip regions that are either before the first hole we want
# or that have no high quality region.
continue
elif hole_number > last_hole:
# Stop searching Regions once we've passed the last hole in
# our search set.
break
elif hole_number in regions_to_update:
# Find all new regions that intersect this hole's high
# quality region, keep only those regions where start <= end.
new_regions = filter(
lambda x: x[0] <= x[1],
[intersect_regions(region[2:4], region_to_update)
for region_to_update in regions_to_update[hole_number]]
)
# If there are any regions that passed the above tests, pick
# the region with the largest range (based on end -
# start). Otherwise, there are no valid new regions within
# this high quality region, so the entire region must be set
# to a zero length.
if len(new_regions) > 0:
new_region = max(new_regions, key=lambda r: r[1] - r[0])
else:
new_region = (0, 0)
# Don't try to update regions with the same range.
if tuple(region[2:4]) != tuple(new_region):
sys.stderr.write("update %i: %s to %s\n" % (hole_number, tuple(region[2:4]), tuple(new_region)))
total_bases_removed += (region[3] - region[2]) - (new_region[1] - new_region[0])
region[2:4] = new_region
h5f["PulseData/Regions"][i] = region
sys.stderr.write("total bases removed: %i\n" % total_bases_removed)
def intersect_regions(region_a, region_b):
"""
Given a pair of coordinates, returns the coordinates that overlap between
the two regions.
"""
return (max(region_a[0], region_b[0]), min(region_a[1], region_b[1]))
if __name__ == "__main__":
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
if len(args) != 2:
sys.stderr.write("Specify a filename prefix and a tab-delimited regions file.\n")
parser.print_usage()
sys.exit(1)
update_regions(*args)
Update update_regions.py
#!/bin/env python
"""
Given a pair of new regions for each hole number that matched vector sequence,
update the hole number's original region entry with the maximum of the two new
regions.
"""
usage = """%prog filename_prefix new_regions.tab"""
from collections import defaultdict
import csv
import h5py
from optparse import OptionParser
import os
import sys
def update_regions(filename_prefix, regions_filename):
"""
Given a tab-delimited file of regions per hole, find each hole's original
high quality region and resize that region with the largest of the two new
regions when applicable.
"""
filename = filename_prefix
# Read in region coordinates and group them by filename and then by hole number.
regions_by_file = defaultdict(dict)
with open(regions_filename, "r") as fh:
reader = csv.reader(fh, delimiter="\t")
for row in reader:
# Each row looks like this with the first column containing the
# filename/hole_number and the other columns the start and end of
# the non-vector region.
# m120301_070511_42141_c100304962550000001523012408061245_s1_p0/10 418 934
pieces = row[0].split("/")
smrtcell_name, hole_number = pieces[:2]
hole_number = int(hole_number)
row_ints = map(int, row[1:])
regions_by_file[filename].setdefault(hole_number, []).append(row_ints)
# Open each unique base file and search regions by hole number.
for filename, regions_to_update in regions_by_file.iteritems():
sys.stderr.write("Found %i regions to update for %s_masked.bas.h5\n" % (len(regions_to_update), filename))
# Use the first and last hole number to reduce the search space of the
# base file.
first_hole = min(regions_to_update.keys())
last_hole = max(regions_to_update.keys())
# Open the base file in append mode to allow in-place updates to the
# Regions dataset.
total_bases_removed = 0
hdf5_filename = "%s_masked.bas.h5" % filename
if not os.path.exists(hdf5_filename):
raise Exception("File '%s' doesn't exist." % hdf5_filename)
with h5py.File(hdf5_filename, "a") as h5f:
for i in xrange(len(h5f["PulseData/Regions"])):
region = h5f["PulseData/Regions"][i]
hole_number = region[0]
if hole_number < first_hole or region[2] == region[3]:
# Skip regions that are either before the first hole we want
# or that have no high quality region.
continue
elif hole_number > last_hole:
# Stop searching Regions once we've passed the last hole in
# our search set.
break
elif hole_number in regions_to_update:
# Find all new regions that intersect this hole's high
# quality region, keep only those regions where start <= end.
new_regions = filter(
lambda x: x[0] <= x[1],
[intersect_regions(region[2:4], region_to_update)
for region_to_update in regions_to_update[hole_number]]
)
# If there are any regions that passed the above tests, pick
# the region with the largest range (based on end -
# start). Otherwise, there are no valid new regions within
# this high quality region, so the entire region must be set
# to a zero length.
if len(new_regions) > 0:
new_region = max(new_regions, key=lambda r: r[1] - r[0])
else:
new_region = (0, 0)
# Don't try to update regions with the same range.
if tuple(region[2:4]) != tuple(new_region):
sys.stderr.write("update %i: %s to %s\n" % (hole_number, tuple(region[2:4]), tuple(new_region)))
total_bases_removed += (region[3] - region[2]) - (new_region[1] - new_region[0])
region[2:4] = new_region
h5f["PulseData/Regions"][i] = region
sys.stderr.write("total bases removed: %i\n" % total_bases_removed)
def intersect_regions(region_a, region_b):
"""
Given a pair of coordinates, returns the coordinates that overlap between
the two regions.
"""
return (max(region_a[0], region_b[0]), min(region_a[1], region_b[1]))
if __name__ == "__main__":
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
if len(args) != 2:
sys.stderr.write("Specify a filename prefix and a tab-delimited regions file.\n")
parser.print_usage()
sys.exit(1)
update_regions(*args)
|
import string
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from sklearn.utils.extmath import logsumexp
from . import _hmmc
from .utils import normalize
decoder_algorithms = frozenset(("viterbi", "map"))
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
Decoder algorithm.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
# TODO: move all validation from descriptors to 'fit' and 'predict'.
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self.algorithm = algorithm
self.random_state = random_state
self.num_iterations_performed = 0
self.threshold_reached = 999999
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float64).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self.algorithm in decoder_algorithms:
algorithm = self.algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
self.num_iterations_performed = i
self.threshold_reached = logprob[-1] - logprob[-2]
if i > 0 and logprob[-1] - logprob[-2] < self.thresh:
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations <= 1:
return
lneta = np.zeros((n_observations - 1, n_components, n_components))
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
stats['trans'] += np.exp(logsumexp(lneta, axis=0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
bug fix
import string
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_random_state
from sklearn.utils.extmath import logsumexp
from . import _hmmc
from .utils import normalize
decoder_algorithms = frozenset(("viterbi", "map"))
ZEROLOGPROB = -1e200
EPS = np.finfo(float).eps
NEGINF = -np.inf
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Attributes
----------
n_components : int
Number of states in the model.
transmat : array, shape (`n_components`, `n_components`)
Matrix of transition probabilities between states.
startprob : array, shape ('n_components`,)
Initial state occupation distribution.
transmat_prior : array, shape (`n_components`, `n_components`)
Matrix of prior transition probabilities between states.
startprob_prior : array, shape ('n_components`,)
Initial state occupation prior distribution.
algorithm : string, one of the decoder_algorithms
Decoder algorithm.
random_state: RandomState or an int seed (0 by default)
A random number generator instance
n_iter : int, optional
Number of iterations to perform.
thresh : float, optional
Convergence threshold.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emmission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emmission parameters. Defaults to all
parameters.
See Also
--------
GMM : Gaussian mixture model
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1, startprob=None, transmat=None,
startprob_prior=None, transmat_prior=None,
algorithm="viterbi", random_state=None,
n_iter=10, thresh=1e-2, params=string.ascii_letters,
init_params=string.ascii_letters):
# TODO: move all validation from descriptors to 'fit' and 'predict'.
self.n_components = n_components
self.n_iter = n_iter
self.thresh = thresh
self.params = params
self.init_params = init_params
self.startprob_ = startprob
self.startprob_prior = startprob_prior
self.transmat_ = transmat
self.transmat_prior = transmat_prior
self.algorithm = algorithm
self.random_state = random_state
self.num_iterations_performed = 0
self.threshold_reached = 999999
def eval(self, X):
return self.score_samples(X)
def score_samples(self, obs):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
logprob : float
Log likelihood of the sequence ``obs``.
posteriors : array_like, shape (n, n_components)
Posterior probabilities of each state for each
observation
See Also
--------
score : Compute the log probability under the model
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
posteriors += np.finfo(np.float64).eps
posteriors /= np.sum(posteriors, axis=1).reshape((-1, 1))
return logprob, posteriors
def score(self, obs):
"""Compute the log probability under the model.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : float
Log likelihood of the ``obs``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors
decode : Find most likely state sequence corresponding to a `obs`
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
logprob, _ = self._do_forward_pass(framelogprob)
return logprob
def _decode_viterbi(self, obs):
"""Find most likely state sequence corresponding to ``obs``.
Uses the Viterbi algorithm.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
viterbi_logprob : float
Log probability of the maximum likelihood path through the HMM.
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model
"""
obs = np.asarray(obs)
framelogprob = self._compute_log_likelihood(obs)
viterbi_logprob, state_sequence = self._do_viterbi_pass(framelogprob)
return viterbi_logprob, state_sequence
def _decode_map(self, obs):
"""Find most likely state sequence corresponding to `obs`.
Uses the maximum a posteriori estimation.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
map_logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
_, posteriors = self.score_samples(obs)
state_sequence = np.argmax(posteriors, axis=1)
map_logprob = np.max(posteriors, axis=1).sum()
return map_logprob, state_sequence
def decode(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to ``obs``.
Uses the selected algorithm for decoding.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
algorithm : string, one of the `decoder_algorithms`
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the maximum likelihood path through the HMM
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
if self.algorithm in decoder_algorithms:
algorithm = self.algorithm
elif algorithm in decoder_algorithms:
algorithm = algorithm
decoder = {"viterbi": self._decode_viterbi,
"map": self._decode_map}
logprob, state_sequence = decoder[algorithm](obs)
return logprob, state_sequence
def predict(self, obs, algorithm="viterbi"):
"""Find most likely state sequence corresponding to `obs`.
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
state_sequence : array_like, shape (n,)
Index of the most likely states for each observation
"""
_, state_sequence = self.decode(obs, algorithm)
return state_sequence
def predict_proba(self, obs):
"""Compute the posterior probability for each state in the model
Parameters
----------
obs : array_like, shape (n, n_features)
Sequence of n_features-dimensional data points. Each row
corresponds to a single point in the sequence.
Returns
-------
T : array-like, shape (n, n_components)
Returns the probability of the sample for each state in the model.
"""
_, posteriors = self.score_samples(obs)
return posteriors
def sample(self, n=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If None is given, the
object's random_state is used
Returns
-------
(obs, hidden_states)
obs : array_like, length `n` List of samples
hidden_states : array_like, length `n` List of hidden states
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_pdf = self.startprob_
startprob_cdf = np.cumsum(startprob_pdf)
transmat_pdf = self.transmat_
transmat_cdf = np.cumsum(transmat_pdf, 1)
# Initial state.
rand = random_state.rand()
currstate = (startprob_cdf > rand).argmax()
hidden_states = [currstate]
obs = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for _ in range(n - 1):
rand = random_state.rand()
currstate = (transmat_cdf[currstate] > rand).argmax()
hidden_states.append(currstate)
obs.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.array(obs), np.array(hidden_states, dtype=int)
def fit(self, obs):
"""Estimate model parameters.
An initialization step is performed before entering the EM
algorithm. If you want to avoid this step, pass proper
``init_params`` keyword argument to estimator's constructor.
Parameters
----------
obs : list
List of array-like observation sequences, each of which
has shape (n_i, n_features), where n_i is the length of
the i_th observation.
Notes
-----
In general, `logprob` should be non-decreasing unless
aggressive pruning is used. Decreasing `logprob` is generally
a sign of overfitting (e.g. a covariance parameter getting too
small). You can fix this by getting more training data,
or strengthening the appropriate subclass-specific regularization
parameter.
"""
self._init(obs, self.init_params)
logprob = []
for i in range(self.n_iter):
# Expectation step
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for seq in obs:
framelogprob = self._compute_log_likelihood(seq)
lpr, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
gamma = fwdlattice + bwdlattice
posteriors = np.exp(gamma.T - logsumexp(gamma, axis=1)).T
curr_logprob += lpr
self._accumulate_sufficient_statistics(
stats, seq, framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
logprob.append(curr_logprob)
# Check for convergence.
self.num_iterations_performed = i
if i > 0 and logprob[-1] - logprob[-2] < self.thresh:
self.threshold_reached = logprob[-1] - logprob[-2]
break
# Maximization step
self._do_mstep(stats, self.params)
return self
def _get_algorithm(self):
"decoder algorithm"
return self._algorithm
def _set_algorithm(self, algorithm):
if algorithm not in decoder_algorithms:
raise ValueError("algorithm must be one of the decoder_algorithms")
self._algorithm = algorithm
algorithm = property(_get_algorithm, _set_algorithm)
def _get_startprob(self):
"""Mixing startprob for each state."""
return np.exp(self._log_startprob)
def _set_startprob(self, startprob):
if startprob is None:
startprob = np.tile(1.0 / self.n_components, self.n_components)
else:
startprob = np.asarray(startprob, dtype=np.float)
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(startprob):
normalize(startprob)
if len(startprob) != self.n_components:
raise ValueError('startprob must have length n_components')
if not np.allclose(np.sum(startprob), 1.0):
raise ValueError('startprob must sum to 1.0')
self._log_startprob = np.log(np.asarray(startprob).copy())
startprob_ = property(_get_startprob, _set_startprob)
def _get_transmat(self):
"""Matrix of transition probabilities."""
return np.exp(self._log_transmat)
def _set_transmat(self, transmat):
if transmat is None:
transmat = np.tile(1.0 / self.n_components,
(self.n_components, self.n_components))
# check if there exists a component whose value is exactly zero
# if so, add a small number and re-normalize
if not np.alltrue(transmat):
normalize(transmat, axis=1)
if (np.asarray(transmat).shape
!= (self.n_components, self.n_components)):
raise ValueError('transmat must have shape '
'(n_components, n_components)')
if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
raise ValueError('Rows of transmat must sum to 1.0')
self._log_transmat = np.log(np.asarray(transmat).copy())
underflow_idx = np.isnan(self._log_transmat)
self._log_transmat[underflow_idx] = NEGINF
transmat_ = property(_get_transmat, _set_transmat)
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, self._log_startprob,
self._log_transmat, framelogprob, bwdlattice)
return bwdlattice
def _compute_log_likelihood(self, obs):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, obs, params):
if 's' in params:
self.startprob_.fill(1.0 / self.n_components)
if 't' in params:
self.transmat_.fill(1.0 / self.n_components)
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations <= 1:
return
lneta = np.zeros((n_observations - 1, n_components, n_components))
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
self._log_transmat, bwdlattice, framelogprob,
lneta)
stats['trans'] += np.exp(logsumexp(lneta, axis=0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if self.startprob_prior is None:
self.startprob_prior = 1.0
if self.transmat_prior is None:
self.transmat_prior = 1.0
if 's' in params:
self.startprob_ = normalize(
np.maximum(self.startprob_prior - 1.0 + stats['start'], 1e-20))
if 't' in params:
transmat_ = normalize(
np.maximum(self.transmat_prior - 1.0 + stats['trans'], 1e-20),
axis=1)
self.transmat_ = transmat_
|
# -*- encoding: utf-8
import time
class Retry(object):
def __init__(self, max_retries=2, retry_interval=1, max_retry_interval=5,
increase_retry_interval=True):
self._max_retries = max_retries
self._retry_interval = retry_interval
self._max_retry_interval = max_retry_interval
self._increase_retry_interval = increase_retry_interval
def __call__(self, func):
def wrapper(*args, **kwargs):
interval = self._retry_interval
remaining_retries = self._max_retries
while True:
try:
return func(*args, **kwargs)
except (IOError, OSError):
if remaining_retries <= 0:
raise
time.sleep(interval)
if self._increase_retry_interval:
interval = min(interval * 2, self._max_retry_interval)
remaining_retries -= 1
return wrapper
def call(self, func, *args, **kwargs):
return self(func)(*args, **kwargs)
Update python example
# -*- encoding: utf-8
import time
import functools
class Retry(object):
def __init__(self, max_retries=2, retry_interval=1, max_retry_interval=5,
increase_retry_interval=True):
self._max_retries = max_retries
self._retry_interval = retry_interval
self._max_retry_interval = max_retry_interval
self._increase_retry_interval = increase_retry_interval
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return self.call(func, *args, **kwargs)
return wrapper
def call(self, func, *args, **kwargs):
interval = self._retry_interval
remaining_retries = self._max_retries
while True:
try:
return func(*args, **kwargs)
except (IOError, OSError):
if remaining_retries <= 0:
raise
time.sleep(interval)
if self._increase_retry_interval:
interval = min(interval * 2, self._max_retry_interval)
remaining_retries -= 1
|
#!/usr/bin/env python3
import datetime
import logging
import os
import random
import re
import signal
import subprocess
import sys
import telnetlib
import time
import IPy
import vrnetlab
def handle_SIGCHLD(signal, frame):
os.waitpid(-1, os.WNOHANG)
def handle_SIGTERM(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, handle_SIGTERM)
signal.signal(signal.SIGTERM, handle_SIGTERM)
signal.signal(signal.SIGCHLD, handle_SIGCHLD)
TRACE_LEVEL_NUM = 9
logging.addLevelName(TRACE_LEVEL_NUM, "TRACE")
def trace(self, message, *args, **kws):
# Yes, logger takes its '*args' as 'args'.
if self.isEnabledFor(TRACE_LEVEL_NUM):
self._log(TRACE_LEVEL_NUM, message, args, **kws)
logging.Logger.trace = trace
class VMX_vcp(vrnetlab.VM):
def __init__(self, username, password, image, version, install_mode=False, extra_config=None):
super(VMX_vcp, self).__init__(username, password, disk_image=image, ram=2048, extra_config=extra_config)
self.install_mode = install_mode
self.num_nics = 0
self.qemu_args.extend(["-drive", "if=ide,file=/vmx/vmxhdd.img"])
self.smbios = ["type=0,vendor=Juniper",
"type=1,manufacturer=Juniper,product=VM-vcp_vmx2-161-re-0,version=0.1.0"]
# insert juniper config file into metadata image to prevent auto-image-upgrades
if self.install_mode and version.startswith('18'):
self.insert_juniper_config()
# add metadata image if it exists
if os.path.exists("/vmx/metadata-usb-re.img"):
self.qemu_args.extend(
["-usb", "-drive", "id=my_usb_disk,media=disk,format=raw,file=/vmx/metadata-usb-re.img,if=none",
"-device", "usb-storage,drive=my_usb_disk"])
def start(self):
# use parent class start() function
super(VMX_vcp, self).start()
# add interface to internal control plane bridge
if not self.install_mode:
vrnetlab.run_command(["brctl", "addif", "int_cp", "vcp-int"])
vrnetlab.run_command(["ip", "link", "set", "vcp-int", "up"])
def gen_mgmt(self):
""" Generate mgmt interface(s)
We override the default function since we want a virtio NIC to the
vFPC
"""
# call parent function to generate first mgmt interface (e1000)
res = super(VMX_vcp, self).gen_mgmt()
if not self.install_mode:
# add virtio NIC for internal control plane interface to vFPC
res.append("-device")
res.append("virtio-net-pci,netdev=vcp-int,mac=%s" % vrnetlab.gen_mac(1))
res.append("-netdev")
res.append("tap,ifname=vcp-int,id=vcp-int,script=no,downscript=no")
return res
def bootstrap_spin(self):
""" This function should be called periodically to do work.
returns False when it has failed and given up, otherwise True
"""
if self.spins > 300:
# too many spins with no result -> restart
self.logger.warning("no output from serial console, restarting VCP")
self.stop()
self.start()
self.spins = 0
return
(ridx, match, res) = self.tn.expect([b"login:", b"root@(%|:~ #)"], 1)
if match: # got a match!
if ridx == 0: # matched login prompt, so should login
self.logger.info("matched login prompt")
self.wait_write("root", wait=None)
if ridx == 1:
if self.install_mode:
self.logger.info("requesting power-off")
self.wait_write("cli", None)
self.wait_write("request system power-off", '>')
self.wait_write("yes", 'Power Off the system')
self.running = True
return
# run main config!
self.bootstrap_config()
self.running = True
self.tn.close()
# calc startup time
startup_time = datetime.datetime.now() - self.start_time
self.logger.info("Startup complete in: %s" % startup_time)
return
else:
# no match, if we saw some output from the router it's probably
# booting, so let's give it some more time
if res != b'':
self.logger.trace("OUTPUT VCP: %s" % res.decode())
# reset spins if we saw some output
self.spins = 0
self.spins += 1
def bootstrap_config(self):
""" Do the actual bootstrap config
"""
self.wait_write("cli", None)
self.wait_write("configure", '>', 10)
self.wait_write("set chassis fpc 0 pic 0 number-of-ports 96")
self.wait_write("set system services ssh")
self.wait_write("set system services netconf ssh")
self.wait_write("set system services netconf rfc-compliant")
self.wait_write("set system login user %s class super-user authentication plain-text-password" % self.username)
self.wait_write(self.password, 'New password:')
self.wait_write(self.password, 'Retype new password:')
self.wait_write("set system root-authentication plain-text-password")
self.wait_write(self.password, 'New password:')
self.wait_write(self.password, 'Retype new password:')
self.wait_write("set interfaces fxp0 unit 0 family inet address 10.0.0.15/24")
self.wait_write("delete interfaces fxp0 unit 0 family inet dhcp")
self.wait_write("delete system processes dhcp-service")
# delete auto-image-upgrade so VMX won't restart in VMX 18
self.wait_write("delete chassis auto-image-upgrade")
for line in self.extra_config:
self.wait_write(line)
self.wait_write("commit")
self.wait_write("exit")
def wait_write(self, cmd, wait='#', timeout=None):
""" Wait for something and then send command
"""
if wait:
self.logger.trace("Waiting for %s" % wait)
while True:
(ridx, match, res) = self.tn.expect([wait.encode(), b"Retry connection attempts"], timeout=timeout)
if match:
if ridx == 0:
break
if ridx == 1:
self.tn.write("yes\r".encode())
self.logger.trace("Read: %s" % res.decode())
self.logger.debug("writing to serial console: %s" % cmd)
self.tn.write("{}\r".format(cmd).encode())
def insert_juniper_config(self):
vrnetlab.run_command(["mount", "-o", "loop", "/vmx/metadata-usb-re.img", "/mnt"])
vrnetlab.run_command(["mkdir", "/tmp/vmm-config"])
vrnetlab.run_command(["tar", "-xzvf", "/mnt/vmm-config.tgz", "-C", "/tmp/vmm-config"])
vrnetlab.run_command(["mkdir", "/tmp/vmm-config/config"])
vrnetlab.run_command(["touch", "/tmp/vmm-config/config/juniper.conf"])
vrnetlab.run_command(["tar", "zcf", "vmm-config.tgz", "-C", "/tmp/vmm-config", "."])
vrnetlab.run_command(["cp", "vmm-config.tgz", "/mnt/vmm-config.tgz"])
vrnetlab.run_command(["umount", "/mnt"])
class VMX_vfpc(vrnetlab.VM):
def __init__(self, version):
super(VMX_vfpc, self).__init__(None, None, disk_image = "/vmx/vfpc.img", num=1)
self.version = version
self.num_nics = 96
self.nic_type = "virtio-net-pci"
self.qemu_args.extend(["-cpu", "SandyBridge", "-M", "pc", "-smp", "3"])
# add metadata image if it exists
if os.path.exists("/vmx/metadata-usb-fpc0.img"):
self.qemu_args.extend(
["-usb", "-drive", "id=fpc_usb_disk,media=disk,format=raw,file=/vmx/metadata-usb-fpc0.img,if=none",
"-device", "usb-storage,drive=fpc_usb_disk"])
def gen_mgmt(self):
res = []
# mgmt interface
res.extend(["-device", "virtio-net-pci,netdev=mgmt,mac=%s" % vrnetlab.gen_mac(0)])
res.extend(["-netdev", "user,id=mgmt,net=10.0.0.0/24"])
# internal control plane interface to vFPC
res.extend(["-device", "virtio-net-pci,netdev=vfpc-int,mac=%s" %
vrnetlab.gen_mac(0)])
res.extend(["-netdev",
"tap,ifname=vfpc-int,id=vfpc-int,script=no,downscript=no"])
if self.version in ('15.1F6.9', '16.1R2.11', '17.2R1.13', '18.2R2.6'):
# dummy interface for some vMX versions - not sure why vFPC wants
# it but without it we get a misalignment
res.extend(["-device", "virtio-net-pci,netdev=dummy,mac=%s" %
vrnetlab.gen_mac(0)])
res.extend(["-netdev", "tap,ifname=vfpc-dummy,id=dummy,script=no,downscript=no"])
return res
def start(self):
# use parent class start() function
super(VMX_vfpc, self).start()
# add interface to internal control plane bridge
vrnetlab.run_command(["brctl", "addif", "int_cp", "vfpc-int"])
vrnetlab.run_command(["ip", "link", "set", "vfpc-int", "up"])
def bootstrap_spin(self):
(ridx, match, res) = self.tn.expect([b"localhost login", b"qemux86-64 login", b"mounting /dev/sda2 on /mnt failed"], 1)
if match:
if ridx == 0 or ridx == 1: # got login - vFPC start succeeded!
self.logger.info("vFPC successfully started")
self.running = True
if ridx == 2: # vFPC start failed - restart it
self.logger.info("vFPC start failed, restarting")
self.stop()
self.start()
if res != b'':
pass
#self.logger.trace("OUTPUT VFPC: %s" % res.decode())
return
class VMX(vrnetlab.VR):
""" Juniper vMX router
"""
def __init__(self, username, password, extra_config=None):
self.version = None
self.version_info = []
self.read_version()
super(VMX, self).__init__(username, password)
self.vms = [ VMX_vcp(username, password, "/vmx/" + self.vcp_image, self.version, extra_config=extra_config), VMX_vfpc(self.version) ]
# set up bridge for connecting VCP with vFPC
vrnetlab.run_command(["brctl", "addbr", "int_cp"])
vrnetlab.run_command(["ip", "link", "set", "int_cp", "up"])
def read_version(self):
for e in os.listdir("/vmx/"):
m = re.search("-(([0-9][0-9])\.([0-9])([A-Z])([0-9]+)(\-[SD][0-9]*)?\.([0-9]+))", e)
if m:
self.vcp_image = e
self.version = m.group(1)
self.version_info = [int(m.group(2)), int(m.group(3)), m.group(4), int(m.group(5)), int(m.group(7))]
class VMX_installer(VMX):
""" VMX installer
Will start the VMX VCP and then shut it down. Booting the VCP for the
first time requires the VCP itself to load some config and then it will
restart. Subsequent boots will not require this restart. By running
this "install" when building the docker image we can decrease the
normal startup time of the vMX.
"""
def __init__(self, username, password):
self.version = None
self.version_info = []
self.read_version()
super(VMX, self).__init__(username, password)
self.vms = [ VMX_vcp(username, password, "/vmx/" + self.vcp_image, self.version, install_mode=True) ]
def install(self):
self.logger.info("Installing VMX")
vcp = self.vms[0]
while not vcp.running:
vcp.work()
# wait for system to shut down cleanly
for i in range(0, 600):
time.sleep(1)
try:
vcp.p.communicate(timeout=1)
except subprocess.TimeoutExpired:
pass
except Exception as exc:
# assume it's dead
self.logger.info("Can't communicate with qemu process, assuming VM has shut down properly." + str(exc))
break
try:
(ridx, match, res) = vcp.tn.expect([b"Powering system off"], 1)
if res != b'':
self.logger.trace("OUTPUT VCP: %s" % res.decode())
except Exception as exc:
# assume it's dead
self.logger.info("Can't communicate over serial console, assuming VM has shut down properly." + str(exc))
break
vcp.stop()
self.logger.info("Installation complete")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--trace', action='store_true', help='enable trace level logging')
parser.add_argument('--username', default='vrnetlab', help='Username')
parser.add_argument('--password', default='VR-netlab9', help='Password')
parser.add_argument('--install', action='store_true', help='Install vMX')
parser.add_argument('--extra-config', action='append', default=[], help='Configure vMX with commands on startup')
args = parser.parse_args()
LOG_FORMAT = "%(asctime)s: %(module)-10s %(levelname)-8s %(message)s"
logging.basicConfig(format=LOG_FORMAT)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if args.trace:
logger.setLevel(1)
if args.install:
vr = VMX_installer(args.username, args.password)
vr.install()
else:
vr = VMX(args.username, args.password, args.extra_config)
vr.start()
Detect vFPC started for vMX 18.4, for #9
#!/usr/bin/env python3
import datetime
import logging
import os
import random
import re
import signal
import subprocess
import sys
import telnetlib
import time
import IPy
import vrnetlab
def handle_SIGCHLD(signal, frame):
os.waitpid(-1, os.WNOHANG)
def handle_SIGTERM(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, handle_SIGTERM)
signal.signal(signal.SIGTERM, handle_SIGTERM)
signal.signal(signal.SIGCHLD, handle_SIGCHLD)
TRACE_LEVEL_NUM = 9
logging.addLevelName(TRACE_LEVEL_NUM, "TRACE")
def trace(self, message, *args, **kws):
# Yes, logger takes its '*args' as 'args'.
if self.isEnabledFor(TRACE_LEVEL_NUM):
self._log(TRACE_LEVEL_NUM, message, args, **kws)
logging.Logger.trace = trace
class VMX_vcp(vrnetlab.VM):
def __init__(self, username, password, image, version, install_mode=False, extra_config=None):
super(VMX_vcp, self).__init__(username, password, disk_image=image, ram=2048, extra_config=extra_config)
self.install_mode = install_mode
self.num_nics = 0
self.qemu_args.extend(["-drive", "if=ide,file=/vmx/vmxhdd.img"])
self.smbios = ["type=0,vendor=Juniper",
"type=1,manufacturer=Juniper,product=VM-vcp_vmx2-161-re-0,version=0.1.0"]
# insert juniper config file into metadata image to prevent auto-image-upgrades
if self.install_mode and version.startswith('18'):
self.insert_juniper_config()
# add metadata image if it exists
if os.path.exists("/vmx/metadata-usb-re.img"):
self.qemu_args.extend(
["-usb", "-drive", "id=my_usb_disk,media=disk,format=raw,file=/vmx/metadata-usb-re.img,if=none",
"-device", "usb-storage,drive=my_usb_disk"])
def start(self):
# use parent class start() function
super(VMX_vcp, self).start()
# add interface to internal control plane bridge
if not self.install_mode:
vrnetlab.run_command(["brctl", "addif", "int_cp", "vcp-int"])
vrnetlab.run_command(["ip", "link", "set", "vcp-int", "up"])
def gen_mgmt(self):
""" Generate mgmt interface(s)
We override the default function since we want a virtio NIC to the
vFPC
"""
# call parent function to generate first mgmt interface (e1000)
res = super(VMX_vcp, self).gen_mgmt()
if not self.install_mode:
# add virtio NIC for internal control plane interface to vFPC
res.append("-device")
res.append("virtio-net-pci,netdev=vcp-int,mac=%s" % vrnetlab.gen_mac(1))
res.append("-netdev")
res.append("tap,ifname=vcp-int,id=vcp-int,script=no,downscript=no")
return res
def bootstrap_spin(self):
""" This function should be called periodically to do work.
returns False when it has failed and given up, otherwise True
"""
if self.spins > 300:
# too many spins with no result -> restart
self.logger.warning("no output from serial console, restarting VCP")
self.stop()
self.start()
self.spins = 0
return
(ridx, match, res) = self.tn.expect([b"login:", b"root@(%|:~ #)"], 1)
if match: # got a match!
if ridx == 0: # matched login prompt, so should login
self.logger.info("matched login prompt")
self.wait_write("root", wait=None)
if ridx == 1:
if self.install_mode:
self.logger.info("requesting power-off")
self.wait_write("cli", None)
self.wait_write("request system power-off", '>')
self.wait_write("yes", 'Power Off the system')
self.running = True
return
# run main config!
self.bootstrap_config()
self.running = True
self.tn.close()
# calc startup time
startup_time = datetime.datetime.now() - self.start_time
self.logger.info("Startup complete in: %s" % startup_time)
return
else:
# no match, if we saw some output from the router it's probably
# booting, so let's give it some more time
if res != b'':
self.logger.trace("OUTPUT VCP: %s" % res.decode())
# reset spins if we saw some output
self.spins = 0
self.spins += 1
def bootstrap_config(self):
""" Do the actual bootstrap config
"""
self.wait_write("cli", None)
self.wait_write("configure", '>', 10)
self.wait_write("set chassis fpc 0 pic 0 number-of-ports 96")
self.wait_write("set system services ssh")
self.wait_write("set system services netconf ssh")
self.wait_write("set system services netconf rfc-compliant")
self.wait_write("set system login user %s class super-user authentication plain-text-password" % self.username)
self.wait_write(self.password, 'New password:')
self.wait_write(self.password, 'Retype new password:')
self.wait_write("set system root-authentication plain-text-password")
self.wait_write(self.password, 'New password:')
self.wait_write(self.password, 'Retype new password:')
self.wait_write("set interfaces fxp0 unit 0 family inet address 10.0.0.15/24")
self.wait_write("delete interfaces fxp0 unit 0 family inet dhcp")
self.wait_write("delete system processes dhcp-service")
# delete auto-image-upgrade so VMX won't restart in VMX 18
self.wait_write("delete chassis auto-image-upgrade")
for line in self.extra_config:
self.wait_write(line)
self.wait_write("commit")
self.wait_write("exit")
def wait_write(self, cmd, wait='#', timeout=None):
""" Wait for something and then send command
"""
if wait:
self.logger.trace("Waiting for %s" % wait)
while True:
(ridx, match, res) = self.tn.expect([wait.encode(), b"Retry connection attempts"], timeout=timeout)
if match:
if ridx == 0:
break
if ridx == 1:
self.tn.write("yes\r".encode())
self.logger.trace("Read: %s" % res.decode())
self.logger.debug("writing to serial console: %s" % cmd)
self.tn.write("{}\r".format(cmd).encode())
def insert_juniper_config(self):
vrnetlab.run_command(["mount", "-o", "loop", "/vmx/metadata-usb-re.img", "/mnt"])
vrnetlab.run_command(["mkdir", "/tmp/vmm-config"])
vrnetlab.run_command(["tar", "-xzvf", "/mnt/vmm-config.tgz", "-C", "/tmp/vmm-config"])
vrnetlab.run_command(["mkdir", "/tmp/vmm-config/config"])
vrnetlab.run_command(["touch", "/tmp/vmm-config/config/juniper.conf"])
vrnetlab.run_command(["tar", "zcf", "vmm-config.tgz", "-C", "/tmp/vmm-config", "."])
vrnetlab.run_command(["cp", "vmm-config.tgz", "/mnt/vmm-config.tgz"])
vrnetlab.run_command(["umount", "/mnt"])
class VMX_vfpc(vrnetlab.VM):
def __init__(self, version):
super(VMX_vfpc, self).__init__(None, None, disk_image = "/vmx/vfpc.img", num=1)
self.version = version
self.num_nics = 96
self.nic_type = "virtio-net-pci"
self.qemu_args.extend(["-cpu", "SandyBridge", "-M", "pc", "-smp", "3"])
# add metadata image if it exists
if os.path.exists("/vmx/metadata-usb-fpc0.img"):
self.qemu_args.extend(
["-usb", "-drive", "id=fpc_usb_disk,media=disk,format=raw,file=/vmx/metadata-usb-fpc0.img,if=none",
"-device", "usb-storage,drive=fpc_usb_disk"])
def gen_mgmt(self):
res = []
# mgmt interface
res.extend(["-device", "virtio-net-pci,netdev=mgmt,mac=%s" % vrnetlab.gen_mac(0)])
res.extend(["-netdev", "user,id=mgmt,net=10.0.0.0/24"])
# internal control plane interface to vFPC
res.extend(["-device", "virtio-net-pci,netdev=vfpc-int,mac=%s" %
vrnetlab.gen_mac(0)])
res.extend(["-netdev",
"tap,ifname=vfpc-int,id=vfpc-int,script=no,downscript=no"])
if self.version in ('15.1F6.9', '16.1R2.11', '17.2R1.13', '18.2R2.6', '18.4R1.8'):
# dummy interface for some vMX versions - not sure why vFPC wants
# it but without it we get a misalignment
res.extend(["-device", "virtio-net-pci,netdev=dummy,mac=%s" %
vrnetlab.gen_mac(0)])
res.extend(["-netdev", "tap,ifname=vfpc-dummy,id=dummy,script=no,downscript=no"])
return res
def start(self):
# use parent class start() function
super(VMX_vfpc, self).start()
# add interface to internal control plane bridge
vrnetlab.run_command(["brctl", "addif", "int_cp", "vfpc-int"])
vrnetlab.run_command(["ip", "link", "set", "vfpc-int", "up"])
def bootstrap_spin(self):
(ridx, match, res) = self.tn.expect([b"localhost login", b"qemux86-64 login", b"mounting /dev/sda2 on /mnt failed"], 1)
if match:
if ridx in (0, 1): # got login - vFPC start succeeded!
self.logger.info("vFPC successfully started")
self.running = True
if ridx == 2: # vFPC start failed - restart it
self.logger.info("vFPC start failed, restarting")
self.stop()
self.start()
if res != b'':
pass
#self.logger.trace("OUTPUT VFPC: %s" % res.decode())
return
class VMX(vrnetlab.VR):
""" Juniper vMX router
"""
def __init__(self, username, password, extra_config=None):
self.version = None
self.version_info = []
self.read_version()
super(VMX, self).__init__(username, password)
self.vms = [ VMX_vcp(username, password, "/vmx/" + self.vcp_image, self.version, extra_config=extra_config), VMX_vfpc(self.version) ]
# set up bridge for connecting VCP with vFPC
vrnetlab.run_command(["brctl", "addbr", "int_cp"])
vrnetlab.run_command(["ip", "link", "set", "int_cp", "up"])
def read_version(self):
for e in os.listdir("/vmx/"):
m = re.search("-(([0-9][0-9])\.([0-9])([A-Z])([0-9]+)(\-[SD][0-9]*)?\.([0-9]+))", e)
if m:
self.vcp_image = e
self.version = m.group(1)
self.version_info = [int(m.group(2)), int(m.group(3)), m.group(4), int(m.group(5)), int(m.group(7))]
class VMX_installer(VMX):
""" VMX installer
Will start the VMX VCP and then shut it down. Booting the VCP for the
first time requires the VCP itself to load some config and then it will
restart. Subsequent boots will not require this restart. By running
this "install" when building the docker image we can decrease the
normal startup time of the vMX.
"""
def __init__(self, username, password):
self.version = None
self.version_info = []
self.read_version()
super(VMX, self).__init__(username, password)
self.vms = [ VMX_vcp(username, password, "/vmx/" + self.vcp_image, self.version, install_mode=True) ]
def install(self):
self.logger.info("Installing VMX")
vcp = self.vms[0]
while not vcp.running:
vcp.work()
# wait for system to shut down cleanly
for i in range(0, 600):
time.sleep(1)
try:
vcp.p.communicate(timeout=1)
except subprocess.TimeoutExpired:
pass
except Exception as exc:
# assume it's dead
self.logger.info("Can't communicate with qemu process, assuming VM has shut down properly." + str(exc))
break
try:
(ridx, match, res) = vcp.tn.expect([b"Powering system off"], 1)
if res != b'':
self.logger.trace("OUTPUT VCP: %s" % res.decode())
except Exception as exc:
# assume it's dead
self.logger.info("Can't communicate over serial console, assuming VM has shut down properly." + str(exc))
break
vcp.stop()
self.logger.info("Installation complete")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument('--trace', action='store_true', help='enable trace level logging')
parser.add_argument('--username', default='vrnetlab', help='Username')
parser.add_argument('--password', default='VR-netlab9', help='Password')
parser.add_argument('--install', action='store_true', help='Install vMX')
parser.add_argument('--extra-config', action='append', default=[], help='Configure vMX with commands on startup')
args = parser.parse_args()
LOG_FORMAT = "%(asctime)s: %(module)-10s %(levelname)-8s %(message)s"
logging.basicConfig(format=LOG_FORMAT)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if args.trace:
logger.setLevel(1)
if args.install:
vr = VMX_installer(args.username, args.password)
vr.install()
else:
vr = VMX(args.username, args.password, args.extra_config)
vr.start()
|
from django.http import Http404
from django.shortcuts import render, redirect
from django.views.generic.edit import CreateView, UpdateView, FormView
from django.views.generic import DetailView, View, ListView
from django.contrib import messages
from django.shortcuts import get_object_or_404
from .models import User, Skill
from tag.models import Tag
from base.utils import send_template_mail
from django.contrib.auth import views as auth_views
import uuid
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login
from django.utils.decorators import method_decorator
from django.utils import timezone
from .models import User, UserActivation, UserPasswordResetting, UserReviewList
from .form import UserReviewListForm
from django.urls import reverse
from django.contrib.auth.mixins import UserPassesTestMixin
from django.utils import timezone
from django.forms import formset_factory
from event.models import Event
from django.utils import translation
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
class UserCreateView(CreateView):
model = User
fields = ['email', 'password', 'username']
template_name = 'user/register.html'
def form_valid(self, form):
user = form.save(commit=False)
user.set_password(form.cleaned_data['password'])
user.is_active = False
user.save()
activation_key = self.create_activation_key()
activation = UserActivation(user=user, key=activation_key)
activation.save()
base_url = "/".join(self.request.build_absolute_uri().split("/")[:3])
activation_url = "{0}/user/activation/{1}".format(base_url,
activation_key)
send_template_mail(
"email/activation.txt",
{"activation_url": activation_url},
"Sovol Info <info@sovol.earth>",
[user.email],
)
info_msg = _("Confirmation email has been sent to your email address.") % {
'email': user.email,
}
messages.info(self.request, info_msg)
return redirect("top")
key = uuid.uuid4().hex
return key
def get_form(self):
from django import forms
form = super().get_form()
form.fields['password'].widget = forms.PasswordInput()
form.fields['username'].maxlength = 15
form.fields['username'].label = _("Username (Up to 15 characters)")
return form
class UserActivationView(View):
def get(self, request, *args, **kwargs):
activation = get_object_or_404(UserActivation, key=kwargs['key'])
user = activation.user
user.is_active = True
user.save()
login(request, user, "django.contrib.auth.backends.ModelBackend")
messages.info(request, _("You have successfully registered!"))
return redirect("top")
class RequestPasswordReset(View):
def get(self, request, *args, **kwargs):
return render(request, 'user/request_password_reset.html')
def post(self, request, *args, **kwargs):
email = request.POST.get('email')
user = User.objects.filter(email=email)
if user.exists():
user = user.first()
reset_key = self.create_reset_key()
if hasattr(user, "userpasswordresetting"):
user.userpasswordresetting.key = reset_key
user.userpasswordresetting.save()
else:
UserPasswordResetting(user=user, key=reset_key).save()
# XXX: What does 3 mean?
# XXX: os.path?
absolute_uri = self.request.build_absolute_uri()
base_url = "/".join(absolute_uri.split("/")[:3])
reset_url = "{0}/user/reset_password/{1}".format(base_url,
reset_key)
send_template_mail(
"email/reset_password.txt",
{"reset_url": reset_url},
"Sovol Info<info@sovol.earth>",
[user.email]
)
info_msg = (_("A password reset was requested.")
_("If the email address is registered,")
_("URL for resetting your password will be sent."))
messages.info(request, info_msg)
return redirect("top")
def create_reset_key(self):
key = uuid.uuid4().hex
return key
class ResetPassword(View):
def get(self, request, *args, **kwargs):
resetting = UserPasswordResetting.objects.filter(key=kwargs['key'])
if not resetting.exists():
messages.error(request, _("Invalid URL"))
return redirect("top")
else:
return render(request, 'user/reset_password.html')
def post(self, request, *args, **kwargs):
password = request.POST.get('password')
resetting = UserPasswordResetting.objects.filter(key=kwargs['key'])
if resetting .exists():
resetting = resetting .first()
else:
messages.error(request, _("Failed to reset your password.")
return redirect("top")
user = resetting.user
user.set_password(password)
user.save()
login(request, user, "django.contrib.auth.backends.ModelBackend")
messages.info(request, _("Your new password has been set."))
return redirect("top")
class UserDetailView(DetailView):
template_name = 'user/detail.html'
model = User
context_object_name = 'user'
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
except Http404:
messages.error(request, _("No user found"))
return redirect('top')
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
return context
class UserEditView(UpdateView):
model = User
fields = [
'username',
'email',
'image',
'region',
'sex',
'birthday',
'language',
]
template_name = 'user/edit.html'
def get_object(self, queryset=None):
return self.request.user
def get_context_data(self, **kwargs):
context = super(UserEditView, self).get_context_data(**kwargs)
context['all_tags'] = Tag.objects.all
context['languages'] = settings.LANGUAGES
return context
def form_valid(self, form):
user = form.save(commit=False)
new_tags = set([int(t) for t in self.request.POST.getlist('tags')])
old_tags = set([t.id for t in user.follow_tag.all()])
for tag_id in new_tags - old_tags:
user.follow_tag.add(tag_id)
for tag_id in old_tags - new_tags:
user.follow_tag.remove(tag_id)
self.request.session[translation.LANGUAGE_SESSION_KEY] = user.language
messages.info(self.request, _("User profile was successfully edited."))
return super(UserEditView, self).form_valid(form)
class AcquireEmail(View):
def get(self, request, *args, **kwargs):
"""
Request email for the create user flow for logins that don't specify
their email address.
"""
backend = request.session['partial_pipeline']['backend']
return render(request, 'user/acquire_email.html', {"backend": backend})
def logout(request):
messages.info(request, _("You have been successfully logged out."))
return auth_views.logout(request, next_page="/")
# Review
class UserReviewView(DetailView):
model = User
template_name = 'user/user_review.html'
class UserPostReviewView(FormView):
template_name = 'user/user_post_review.html'
form_class = UserReviewListForm
model = User
def get_context_data(self, **kwargs):
context = super(UserPostReviewView, self).get_context_data(**kwargs)
if 'event_id' in self.request.GET:
joined_event = Event.objects.get(pk=self.request.GET['event_id'])
context['review_event'] = joined_event
if 'to_user_id' in self.request.GET:
to_user = User.objects.get(pk=self.request.GET['to_user_id'])
context['to_user'] = to_user
return context
def form_valid(self, form):
if 'event_id' in self.request.GET:
joined_event = Event.objects.get(pk=self.request.GET['event_id'])
else:
messages.error(self.request, "Url Error")
return self.form_invalid(form)
# Host User review participant (True)
if 'to_user_id' in self.request.GET:
to_user = User.objects.get(pk=self.request.GET['to_user_id'])
form.instance.event_host = True
else:
to_user = User.objects.get(pk=joined_event.host_user.id) # pkを取得 評価対象
## Validators
# params
from_reviews = self.request.user.from_rate_user.all()
to_from_event_list = []
for review in from_reviews:
to_from_event_list.append([review.to_rate_user,
review.from_rate_user,
review.joined_event])
# Past joined_or_hosted_event or not
if (joined_event not in self.request.user.get_past_participated_events()) and (joined_event not in self.request.user.get_past_hosted_events()):
messages.error(self.request, "Invalid Review")
return self.form_invalid(form)
# from_User is Host or Participant
if (self.request.user not in joined_event.participant.all()) and (self.request.user != joined_event.host_user):
# form.add_error('rating', 'Incident with this email already exist')
messages.error(self.request, "Invalid Review")
return self.form_invalid(form)
# to_User is Host or Participant
if (to_user not in joined_event.participant.all()) and (to_user != joined_event.host_user):
# form.add_error('rating', 'Incident with this email already exist')
messages.error(self.request, "Invalid Review")
return self.form_invalid(form)
# from user Participant -> Host or not
if (self.request.user in joined_event.participant.all()) and (to_user != joined_event.host_user):
messages.error(self.request, "Invalid Review")
return self.form_invalid(form)
# rom user Host -> Participant or not
if (self.request.user == joined_event.host_user) and (to_user not in joined_event.participant.all()):
messages.error(self.request, "Invalid Review")
return self.form_invalid(form)
# Check Already Reviewed or not
if [to_user, self.request.user, joined_event] in to_from_event_list:
messages.error(self.request, "You Already Reviewd")
return self.form_invalid(form)
# Set Instanse
form.instance.to_rate_user_id = to_user.id
form.instance.from_rate_user_id = self.request.user.id # 評価者 <--
form.instance.joined_event_id = joined_event.id
form.save()
return super(UserPostReviewView, self).form_valid(form)
# レビュー投稿時に未レビューページに帰還
def get_success_url(self, **kwargs):
messages.info(self.request, "Your review was successfully sent")
return reverse('user:unreviewed')
class UserUnReviewedView(ListView):
# なぜ model and form_class がセットでも動くのかわかりません。
model = User
template_name = 'user/user_unreviewed.html'
class UserSkillView(DetailView):
model = User
template_name = "user/user_skill.html"
class UserSkillEditView(UpdateView):
model = Skill
tamplate_name = 'user/user_form.html'
fields = ['skilltodo']
def form_valid(self,form):
form_redirect = super(UserSkillEditView, self).form_valid(form)
skill = form.save(commit=False)
new_tags = set([int(t) for t in self.request.POST.getlist('tags')])
old_tags = set([t.id for t in skill.tag.all()])
for tag_id in new_tags - old_tags:
skill.tag.add(tag_id)
for tag_id in old_tags - new_tags:
skill.tag.remove(tag_id)
return form_redirect
def get_context_data(self, **kwargs):
context = super(UserSkillEditView, self).get_context_data(**kwargs)
context['all_tags'] = Tag.objects.all
return context
def get_success_url(self, **kwargs):
messages.info(self.request, "スキル内容を変更しました")
userskill_id = self.request.user.id
return reverse('user:skill', kwargs={'pk': userskill_id})
@method_decorator(login_required, name='dispatch')
class UserSkillAddView(CreateView):
model = Skill
fields = ['skilltodo']
success_url = "../../"
template_name = "user/skill_add.html"
def get_context_data(self, **kwargs):
context = super(UserSkillAddView, self).get_context_data(**kwargs)
context['all_tags'] = Tag.objects.all
return context
def form_valid(self, form):
form_redirect = super(UserSkillAddView, self).form_valid(form)
skill = form.save()
skill.tag.clear()
for tag_id in self.request.POST.getlist('tags'):
skill.tag.add(int(tag_id))
form.instance.userskill_id = self.request.user.id
form.save()
return form_redirect
def get_success_url(self, **kwargs):
messages.info(self.request, _("Your new skill has been added successfully."))
userskill_id = self.request.user.id
return reverse('user:skill', kwargs={'pk': userskill_id})
[BUGFIX] Add missing closing paren
from django.http import Http404
from django.shortcuts import render, redirect
from django.views.generic.edit import CreateView, UpdateView, FormView
from django.views.generic import DetailView, View, ListView
from django.contrib import messages
from django.shortcuts import get_object_or_404
from .models import User, Skill
from tag.models import Tag
from base.utils import send_template_mail
from django.contrib.auth import views as auth_views
import uuid
from django.contrib.auth.decorators import login_required
from django.contrib.auth import login
from django.utils.decorators import method_decorator
from django.utils import timezone
from .models import User, UserActivation, UserPasswordResetting, UserReviewList
from .form import UserReviewListForm
from django.urls import reverse
from django.contrib.auth.mixins import UserPassesTestMixin
from django.utils import timezone
from django.forms import formset_factory
from event.models import Event
from django.utils import translation
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
class UserCreateView(CreateView):
model = User
fields = ['email', 'password', 'username']
template_name = 'user/register.html'
def form_valid(self, form):
user = form.save(commit=False)
user.set_password(form.cleaned_data['password'])
user.is_active = False
user.save()
activation_key = self.create_activation_key()
activation = UserActivation(user=user, key=activation_key)
activation.save()
base_url = "/".join(self.request.build_absolute_uri().split("/")[:3])
activation_url = "{0}/user/activation/{1}".format(base_url,
activation_key)
send_template_mail(
"email/activation.txt",
{"activation_url": activation_url},
"Sovol Info <info@sovol.earth>",
[user.email],
)
info_msg = _("Confirmation email has been sent to your email address.") % {
'email': user.email,
}
messages.info(self.request, info_msg)
return redirect("top")
key = uuid.uuid4().hex
return key
def get_form(self):
from django import forms
form = super().get_form()
form.fields['password'].widget = forms.PasswordInput()
form.fields['username'].maxlength = 15
form.fields['username'].label = _("Username (Up to 15 characters)")
return form
class UserActivationView(View):
def get(self, request, *args, **kwargs):
activation = get_object_or_404(UserActivation, key=kwargs['key'])
user = activation.user
user.is_active = True
user.save()
login(request, user, "django.contrib.auth.backends.ModelBackend")
messages.info(request, _("You have successfully registered!"))
return redirect("top")
class RequestPasswordReset(View):
def get(self, request, *args, **kwargs):
return render(request, 'user/request_password_reset.html')
def post(self, request, *args, **kwargs):
email = request.POST.get('email')
user = User.objects.filter(email=email)
if user.exists():
user = user.first()
reset_key = self.create_reset_key()
if hasattr(user, "userpasswordresetting"):
user.userpasswordresetting.key = reset_key
user.userpasswordresetting.save()
else:
UserPasswordResetting(user=user, key=reset_key).save()
# XXX: What does 3 mean?
# XXX: os.path?
absolute_uri = self.request.build_absolute_uri()
base_url = "/".join(absolute_uri.split("/")[:3])
reset_url = "{0}/user/reset_password/{1}".format(base_url,
reset_key)
send_template_mail(
"email/reset_password.txt",
{"reset_url": reset_url},
"Sovol Info<info@sovol.earth>",
[user.email]
)
info_msg = (_("A password reset was requested.")
_("If the email address is registered,")
_("URL for resetting your password will be sent."))
messages.info(request, info_msg)
return redirect("top")
def create_reset_key(self):
key = uuid.uuid4().hex
return key
class ResetPassword(View):
def get(self, request, *args, **kwargs):
resetting = UserPasswordResetting.objects.filter(key=kwargs['key'])
if not resetting.exists():
messages.error(request, _("Invalid URL"))
return redirect("top")
else:
return render(request, 'user/reset_password.html')
def post(self, request, *args, **kwargs):
password = request.POST.get('password')
resetting = UserPasswordResetting.objects.filter(key=kwargs['key'])
if resetting .exists():
resetting = resetting .first()
else:
messages.error(request, _("Failed to reset your password."))
return redirect("top")
user = resetting.user
user.set_password(password)
user.save()
login(request, user, "django.contrib.auth.backends.ModelBackend")
messages.info(request, _("Your new password has been set."))
return redirect("top")
class UserDetailView(DetailView):
template_name = 'user/detail.html'
model = User
context_object_name = 'user'
def get(self, request, *args, **kwargs):
try:
self.object = self.get_object()
except Http404:
messages.error(request, _("No user found"))
return redirect('top')
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
return context
class UserEditView(UpdateView):
model = User
fields = [
'username',
'email',
'image',
'region',
'sex',
'birthday',
'language',
]
template_name = 'user/edit.html'
def get_object(self, queryset=None):
return self.request.user
def get_context_data(self, **kwargs):
context = super(UserEditView, self).get_context_data(**kwargs)
context['all_tags'] = Tag.objects.all
context['languages'] = settings.LANGUAGES
return context
def form_valid(self, form):
user = form.save(commit=False)
new_tags = set([int(t) for t in self.request.POST.getlist('tags')])
old_tags = set([t.id for t in user.follow_tag.all()])
for tag_id in new_tags - old_tags:
user.follow_tag.add(tag_id)
for tag_id in old_tags - new_tags:
user.follow_tag.remove(tag_id)
self.request.session[translation.LANGUAGE_SESSION_KEY] = user.language
messages.info(self.request, _("User profile was successfully edited."))
return super(UserEditView, self).form_valid(form)
class AcquireEmail(View):
def get(self, request, *args, **kwargs):
"""
Request email for the create user flow for logins that don't specify
their email address.
"""
backend = request.session['partial_pipeline']['backend']
return render(request, 'user/acquire_email.html', {"backend": backend})
def logout(request):
messages.info(request, _("You have been successfully logged out."))
return auth_views.logout(request, next_page="/")
# Review
class UserReviewView(DetailView):
model = User
template_name = 'user/user_review.html'
class UserPostReviewView(FormView):
template_name = 'user/user_post_review.html'
form_class = UserReviewListForm
model = User
def get_context_data(self, **kwargs):
context = super(UserPostReviewView, self).get_context_data(**kwargs)
if 'event_id' in self.request.GET:
joined_event = Event.objects.get(pk=self.request.GET['event_id'])
context['review_event'] = joined_event
if 'to_user_id' in self.request.GET:
to_user = User.objects.get(pk=self.request.GET['to_user_id'])
context['to_user'] = to_user
return context
def form_valid(self, form):
if 'event_id' in self.request.GET:
joined_event = Event.objects.get(pk=self.request.GET['event_id'])
else:
messages.error(self.request, "Url Error")
return self.form_invalid(form)
# Host User review participant (True)
if 'to_user_id' in self.request.GET:
to_user = User.objects.get(pk=self.request.GET['to_user_id'])
form.instance.event_host = True
else:
to_user = User.objects.get(pk=joined_event.host_user.id) # pkを取得 評価対象
## Validators
# params
from_reviews = self.request.user.from_rate_user.all()
to_from_event_list = []
for review in from_reviews:
to_from_event_list.append([review.to_rate_user,
review.from_rate_user,
review.joined_event])
# Past joined_or_hosted_event or not
if (joined_event not in self.request.user.get_past_participated_events()) and (joined_event not in self.request.user.get_past_hosted_events()):
messages.error(self.request, "Invalid Review")
return self.form_invalid(form)
# from_User is Host or Participant
if (self.request.user not in joined_event.participant.all()) and (self.request.user != joined_event.host_user):
# form.add_error('rating', 'Incident with this email already exist')
messages.error(self.request, "Invalid Review")
return self.form_invalid(form)
# to_User is Host or Participant
if (to_user not in joined_event.participant.all()) and (to_user != joined_event.host_user):
# form.add_error('rating', 'Incident with this email already exist')
messages.error(self.request, "Invalid Review")
return self.form_invalid(form)
# from user Participant -> Host or not
if (self.request.user in joined_event.participant.all()) and (to_user != joined_event.host_user):
messages.error(self.request, "Invalid Review")
return self.form_invalid(form)
# rom user Host -> Participant or not
if (self.request.user == joined_event.host_user) and (to_user not in joined_event.participant.all()):
messages.error(self.request, "Invalid Review")
return self.form_invalid(form)
# Check Already Reviewed or not
if [to_user, self.request.user, joined_event] in to_from_event_list:
messages.error(self.request, "You Already Reviewd")
return self.form_invalid(form)
# Set Instanse
form.instance.to_rate_user_id = to_user.id
form.instance.from_rate_user_id = self.request.user.id # 評価者 <--
form.instance.joined_event_id = joined_event.id
form.save()
return super(UserPostReviewView, self).form_valid(form)
# レビュー投稿時に未レビューページに帰還
def get_success_url(self, **kwargs):
messages.info(self.request, "Your review was successfully sent")
return reverse('user:unreviewed')
class UserUnReviewedView(ListView):
# なぜ model and form_class がセットでも動くのかわかりません。
model = User
template_name = 'user/user_unreviewed.html'
class UserSkillView(DetailView):
model = User
template_name = "user/user_skill.html"
class UserSkillEditView(UpdateView):
model = Skill
tamplate_name = 'user/user_form.html'
fields = ['skilltodo']
def form_valid(self,form):
form_redirect = super(UserSkillEditView, self).form_valid(form)
skill = form.save(commit=False)
new_tags = set([int(t) for t in self.request.POST.getlist('tags')])
old_tags = set([t.id for t in skill.tag.all()])
for tag_id in new_tags - old_tags:
skill.tag.add(tag_id)
for tag_id in old_tags - new_tags:
skill.tag.remove(tag_id)
return form_redirect
def get_context_data(self, **kwargs):
context = super(UserSkillEditView, self).get_context_data(**kwargs)
context['all_tags'] = Tag.objects.all
return context
def get_success_url(self, **kwargs):
messages.info(self.request, "スキル内容を変更しました")
userskill_id = self.request.user.id
return reverse('user:skill', kwargs={'pk': userskill_id})
@method_decorator(login_required, name='dispatch')
class UserSkillAddView(CreateView):
model = Skill
fields = ['skilltodo']
success_url = "../../"
template_name = "user/skill_add.html"
def get_context_data(self, **kwargs):
context = super(UserSkillAddView, self).get_context_data(**kwargs)
context['all_tags'] = Tag.objects.all
return context
def form_valid(self, form):
form_redirect = super(UserSkillAddView, self).form_valid(form)
skill = form.save()
skill.tag.clear()
for tag_id in self.request.POST.getlist('tags'):
skill.tag.add(int(tag_id))
form.instance.userskill_id = self.request.user.id
form.save()
return form_redirect
def get_success_url(self, **kwargs):
messages.info(self.request, _("Your new skill has been added successfully."))
userskill_id = self.request.user.id
return reverse('user:skill', kwargs={'pk': userskill_id})
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, an open source suite of business apps
# This module copyright (C) 2015 bloopark systems (<http://bloopark.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Dribbble Social Media Icon Extension",
'summary': """Dribbble Extension for the social media icons from the
odoo core""",
'author': "bloopark systems GmbH & Co. KG, "
"Odoo Community Association (OCA)",
'website': "http://www.bloopark.de",
'license': 'AGPL-3',
'category': 'Social Media',
'version': '1.0',
'depends': [
'base',
'website',
'website_blog'
],
'data': [
'views/website_templates.xml',
'views/website_views.xml',
'views/website_blog_template.xml',
'views/res_config.xml',
],
}
[IMP] edited the version number
# -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, an open source suite of business apps
# This module copyright (C) 2015 bloopark systems (<http://bloopark.de>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Dribbble Social Media Icon Extension",
'summary': """Dribbble Extension for the social media icons from the
odoo core""",
'author': "bloopark systems GmbH & Co. KG, "
"Odoo Community Association (OCA)",
'website': "http://www.bloopark.de",
'license': 'AGPL-3',
'category': 'Social Media',
'version': '8.0.1.0.0',
'depends': [
'base',
'website',
'website_blog'
],
'data': [
'views/website_templates.xml',
'views/website_views.xml',
'views/website_blog_template.xml',
'views/res_config.xml',
],
}
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe, json
from frappe.utils import cint, quoted
from frappe.website.path_resolver import resolve_path
from frappe.model.document import get_controller, Document
from frappe import _
no_cache = 1
def get_context(context, **dict_params):
"""Returns context for a list standard list page.
Will also update `get_list_context` from the doctype module file"""
frappe.local.form_dict.update(dict_params)
doctype = frappe.local.form_dict.doctype
context.parents = [{"route":"me", "title":_("My Account")}]
context.meta = frappe.get_meta(doctype)
context.update(get_list_context(context, doctype) or {})
context.doctype = doctype
context.txt = frappe.local.form_dict.txt
context.update(get(**frappe.local.form_dict))
@frappe.whitelist(allow_guest=True)
def get(doctype, txt=None, limit_start=0, limit=20, pathname=None, **kwargs):
"""Returns processed HTML page for a standard listing."""
limit_start = cint(limit_start)
raw_result = get_list_data(doctype, txt, limit_start, limit=limit + 1, **kwargs)
show_more = len(raw_result) > limit
if show_more:
raw_result = raw_result[:-1]
meta = frappe.get_meta(doctype)
list_context = frappe.flags.list_context
if not raw_result: return {"result": []}
if txt:
list_context.default_subtitle = _('Filtered by "{0}"').format(txt)
result = []
row_template = list_context.row_template or "templates/includes/list/row_template.html"
list_view_fields = [df for df in meta.fields if df.in_list_view][:4]
for doc in raw_result:
doc.doctype = doctype
new_context = frappe._dict(doc=doc, meta=meta,
list_view_fields=list_view_fields)
if not list_context.get_list and not isinstance(new_context.doc, Document):
new_context.doc = frappe.get_doc(doc.doctype, doc.name)
new_context.update(new_context.doc.as_dict())
if not frappe.flags.in_test:
pathname = pathname or frappe.local.request.path
new_context["pathname"] = pathname.strip("/ ")
new_context.update(list_context)
set_route(new_context)
rendered_row = frappe.render_template(row_template, new_context, is_path=True)
result.append(rendered_row)
from frappe.utils.response import json_handler
return {
"raw_result": json.dumps(raw_result, default=json_handler),
"result": result,
"show_more": show_more,
"next_start": limit_start + limit,
}
@frappe.whitelist(allow_guest=True)
def get_list_data(doctype, txt=None, limit_start=0, fields=None, cmd=None, limit=20, web_form_name=None, **kwargs):
"""Returns processed HTML page for a standard listing."""
limit_start = cint(limit_start)
if not txt and frappe.form_dict.search:
txt = frappe.form_dict.search
del frappe.form_dict['search']
controller = get_controller(doctype)
meta = frappe.get_meta(doctype)
filters = prepare_filters(doctype, controller, kwargs)
list_context = get_list_context(frappe._dict(), doctype, web_form_name)
list_context.title_field = getattr(controller, 'website',
{}).get('page_title_field', meta.title_field or 'name')
if list_context.filters:
filters.update(list_context.filters)
_get_list = list_context.get_list or get_list
kwargs = dict(doctype=doctype, txt=txt, filters=filters,
limit_start=limit_start, limit_page_length=limit,
order_by = list_context.order_by or 'modified desc')
# allow guest if flag is set
if not list_context.get_list and (list_context.allow_guest or meta.allow_guest_to_view):
kwargs['ignore_permissions'] = True
raw_result = _get_list(**kwargs)
# list context to be used if called as rendered list
frappe.flags.list_context = list_context
return raw_result
def set_route(context):
'''Set link for the list item'''
if context.web_form_name:
context.route = "{0}?name={1}".format(context.pathname, quoted(context.doc.name))
elif context.doc and getattr(context.doc, 'route', None):
context.route = context.doc.route
else:
context.route = "{0}/{1}".format(context.pathname or quoted(context.doc.doctype),
quoted(context.doc.name))
def prepare_filters(doctype, controller, kwargs):
for key in kwargs.keys():
try:
kwargs[key] = json.loads(kwargs[key])
except ValueError:
pass
filters = frappe._dict(kwargs)
meta = frappe.get_meta(doctype)
if hasattr(controller, 'website') and controller.website.get('condition_field'):
filters[controller.website['condition_field']] = 1
if filters.pathname:
# resolve additional filters from path
resolve_path(filters.pathname)
for key, val in frappe.local.form_dict.items():
if key not in filters and key != 'flags':
filters[key] = val
# filter the filters to include valid fields only
for fieldname, val in list(filters.items()):
if not meta.has_field(fieldname):
del filters[fieldname]
return filters
def get_list_context(context, doctype, web_form_name=None):
from frappe.modules import load_doctype_module
list_context = context or frappe._dict()
meta = frappe.get_meta(doctype)
def update_context_from_module(module, list_context):
# call the user defined method `get_list_context`
# from the python module
if hasattr(module, "get_list_context"):
out = frappe._dict(module.get_list_context(list_context) or {})
if out:
list_context = out
return list_context
# get context from the doctype module
if not meta.custom:
# custom doctypes don't have modules
module = load_doctype_module(doctype)
list_context = update_context_from_module(module, list_context)
# get context for custom webform
if meta.custom and web_form_name:
webform_list_contexts = frappe.get_hooks('webform_list_context')
if webform_list_contexts:
out = frappe._dict(frappe.get_attr(webform_list_contexts[0])(meta.module) or {})
if out:
list_context = out
# get context from web form module
if web_form_name:
web_form = frappe.get_doc('Web Form', web_form_name)
list_context = update_context_from_module(web_form.get_web_form_module(), list_context)
# get path from '/templates/' folder of the doctype
if not meta.custom and not list_context.row_template:
list_context.row_template = meta.get_row_template()
if not meta.custom and not list_context.list_template:
list_context.template = meta.get_list_template() or "www/list.html"
return list_context
def get_list(doctype, txt, filters, limit_start, limit_page_length=20, ignore_permissions=False,
fields=None, order_by=None):
meta = frappe.get_meta(doctype)
if not filters:
filters = []
if not fields:
fields = "distinct *"
or_filters = []
if txt:
if meta.search_fields:
for f in meta.get_search_fields():
if f == 'name' or meta.get_field(f).fieldtype in ('Data', 'Text', 'Small Text', 'Text Editor'):
or_filters.append([doctype, f, "like", "%" + txt + "%"])
else:
if isinstance(filters, dict):
filters["name"] = ("like", "%" + txt + "%")
else:
filters.append([doctype, "name", "like", "%" + txt + "%"])
return frappe.get_list(doctype, fields = fields,
filters=filters, or_filters=or_filters, limit_start=limit_start,
limit_page_length = limit_page_length, ignore_permissions=ignore_permissions,
order_by=order_by)
fix: Pass parent_doctype to get_list
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe, json
from frappe.utils import cint, quoted
from frappe.website.path_resolver import resolve_path
from frappe.model.document import get_controller, Document
from frappe import _
no_cache = 1
def get_context(context, **dict_params):
"""Returns context for a list standard list page.
Will also update `get_list_context` from the doctype module file"""
frappe.local.form_dict.update(dict_params)
doctype = frappe.local.form_dict.doctype
context.parents = [{"route":"me", "title":_("My Account")}]
context.meta = frappe.get_meta(doctype)
context.update(get_list_context(context, doctype) or {})
context.doctype = doctype
context.txt = frappe.local.form_dict.txt
context.update(get(**frappe.local.form_dict))
@frappe.whitelist(allow_guest=True)
def get(doctype, txt=None, limit_start=0, limit=20, pathname=None, **kwargs):
"""Returns processed HTML page for a standard listing."""
limit_start = cint(limit_start)
raw_result = get_list_data(doctype, txt, limit_start, limit=limit + 1, **kwargs)
show_more = len(raw_result) > limit
if show_more:
raw_result = raw_result[:-1]
meta = frappe.get_meta(doctype)
list_context = frappe.flags.list_context
if not raw_result: return {"result": []}
if txt:
list_context.default_subtitle = _('Filtered by "{0}"').format(txt)
result = []
row_template = list_context.row_template or "templates/includes/list/row_template.html"
list_view_fields = [df for df in meta.fields if df.in_list_view][:4]
for doc in raw_result:
doc.doctype = doctype
new_context = frappe._dict(doc=doc, meta=meta,
list_view_fields=list_view_fields)
if not list_context.get_list and not isinstance(new_context.doc, Document):
new_context.doc = frappe.get_doc(doc.doctype, doc.name)
new_context.update(new_context.doc.as_dict())
if not frappe.flags.in_test:
pathname = pathname or frappe.local.request.path
new_context["pathname"] = pathname.strip("/ ")
new_context.update(list_context)
set_route(new_context)
rendered_row = frappe.render_template(row_template, new_context, is_path=True)
result.append(rendered_row)
from frappe.utils.response import json_handler
return {
"raw_result": json.dumps(raw_result, default=json_handler),
"result": result,
"show_more": show_more,
"next_start": limit_start + limit,
}
@frappe.whitelist(allow_guest=True)
def get_list_data(doctype, txt=None, limit_start=0, fields=None, cmd=None, limit=20, web_form_name=None, **kwargs):
"""Returns processed HTML page for a standard listing."""
limit_start = cint(limit_start)
if not txt and frappe.form_dict.search:
txt = frappe.form_dict.search
del frappe.form_dict['search']
controller = get_controller(doctype)
meta = frappe.get_meta(doctype)
filters = prepare_filters(doctype, controller, kwargs)
list_context = get_list_context(frappe._dict(), doctype, web_form_name)
list_context.title_field = getattr(controller, 'website',
{}).get('page_title_field', meta.title_field or 'name')
if list_context.filters:
filters.update(list_context.filters)
_get_list = list_context.get_list or get_list
kwargs = dict(doctype=doctype, txt=txt, filters=filters,
limit_start=limit_start, limit_page_length=limit,
order_by = list_context.order_by or 'modified desc', parent_doctype=kwargs.get("parent_doctype"))
# allow guest if flag is set
if not list_context.get_list and (list_context.allow_guest or meta.allow_guest_to_view):
kwargs['ignore_permissions'] = True
raw_result = _get_list(**kwargs)
# list context to be used if called as rendered list
frappe.flags.list_context = list_context
return raw_result
def set_route(context):
'''Set link for the list item'''
if context.web_form_name:
context.route = "{0}?name={1}".format(context.pathname, quoted(context.doc.name))
elif context.doc and getattr(context.doc, 'route', None):
context.route = context.doc.route
else:
context.route = "{0}/{1}".format(context.pathname or quoted(context.doc.doctype),
quoted(context.doc.name))
def prepare_filters(doctype, controller, kwargs):
for key in kwargs.keys():
try:
kwargs[key] = json.loads(kwargs[key])
except ValueError:
pass
filters = frappe._dict(kwargs)
meta = frappe.get_meta(doctype)
if hasattr(controller, 'website') and controller.website.get('condition_field'):
filters[controller.website['condition_field']] = 1
if filters.pathname:
# resolve additional filters from path
resolve_path(filters.pathname)
for key, val in frappe.local.form_dict.items():
if key not in filters and key != 'flags':
filters[key] = val
# filter the filters to include valid fields only
for fieldname, val in list(filters.items()):
if not meta.has_field(fieldname):
del filters[fieldname]
return filters
def get_list_context(context, doctype, web_form_name=None):
from frappe.modules import load_doctype_module
list_context = context or frappe._dict()
meta = frappe.get_meta(doctype)
def update_context_from_module(module, list_context):
# call the user defined method `get_list_context`
# from the python module
if hasattr(module, "get_list_context"):
out = frappe._dict(module.get_list_context(list_context) or {})
if out:
list_context = out
return list_context
# get context from the doctype module
if not meta.custom:
# custom doctypes don't have modules
module = load_doctype_module(doctype)
list_context = update_context_from_module(module, list_context)
# get context for custom webform
if meta.custom and web_form_name:
webform_list_contexts = frappe.get_hooks('webform_list_context')
if webform_list_contexts:
out = frappe._dict(frappe.get_attr(webform_list_contexts[0])(meta.module) or {})
if out:
list_context = out
# get context from web form module
if web_form_name:
web_form = frappe.get_doc('Web Form', web_form_name)
list_context = update_context_from_module(web_form.get_web_form_module(), list_context)
# get path from '/templates/' folder of the doctype
if not meta.custom and not list_context.row_template:
list_context.row_template = meta.get_row_template()
if not meta.custom and not list_context.list_template:
list_context.template = meta.get_list_template() or "www/list.html"
return list_context
def get_list(doctype, txt, filters, limit_start, limit_page_length=20, ignore_permissions=False,
fields=None, order_by=None, parent_doctype=None):
meta = frappe.get_meta(doctype)
if not filters:
filters = []
if not fields:
fields = "distinct *"
or_filters = []
if txt:
if meta.search_fields:
for f in meta.get_search_fields():
if f == 'name' or meta.get_field(f).fieldtype in ('Data', 'Text', 'Small Text', 'Text Editor'):
or_filters.append([doctype, f, "like", "%" + txt + "%"])
else:
if isinstance(filters, dict):
filters["name"] = ("like", "%" + txt + "%")
else:
filters.append([doctype, "name", "like", "%" + txt + "%"])
return frappe.get_list(doctype, fields = fields,
filters=filters, or_filters=or_filters, limit_start=limit_start,
limit_page_length = limit_page_length, ignore_permissions=ignore_permissions,
order_by=order_by, parent_doctype=parent_doctype)
|
# -*- python -*-
# stdlib imports
import os
import os.path as osp
import sys
# waf imports ---
import waflib.Options
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
_heptooldir = osp.dirname(osp.abspath(__file__))
# add this directory to sys.path to ease the loading of other hepwaf tools
if not _heptooldir in sys.path: sys.path.append(_heptooldir)
### ---------------------------------------------------------------------------
def options(ctx):
if 'darwin' in sys.platform:
ctx.add_option(
'--use-macports',
default=None,
action='store_true',
help="Enable MacPorts")
ctx.add_option(
'--use-fink',
default=None,
action='store_true',
help="Enable Fink")
pass
ctx.add_option(
'--relocate-from',
default=None,
help='top-level path to relocate against (default=${PREFIX})',
)
ctx.add_option(
'--project-version',
default=None,
help='modify the project version used during build',
)
ctx.add_option(
'--local-cfg',
default=None,
help="Path to the local config file listing all type of configuration infos")
ctx.load('hwaf-system', tooldir=_heptooldir)
ctx.load('hwaf-dist', tooldir=_heptooldir)
ctx.load('hwaf-project-mgr', tooldir=_heptooldir)
ctx.load('hwaf-runtime', tooldir=_heptooldir)
pkgdir = 'src'
if osp.exists(pkgdir):
pkgs = hwaf_find_suboptions(pkgdir)
ctx.recurse(pkgs, mandatory=False)
return
### ---------------------------------------------------------------------------
def configure(ctx):
if ctx.options.local_cfg:
fname = osp.abspath(ctx.options.local_cfg)
ctx.start_msg("Manifest file")
ctx.end_msg(fname)
ok = ctx.read_cfg(fname)
ctx.start_msg("Manifest file processing")
ctx.end_msg(ok)
pass
if not ctx.env.HWAF_MODULES: ctx.env.HWAF_MODULES = []
ctx.load('hwaf-system', tooldir=_heptooldir)
ctx.load('hwaf-dist', tooldir=_heptooldir)
ctx.load('hwaf-project-mgr', tooldir=_heptooldir)
ctx.load('hwaf-runtime', tooldir=_heptooldir)
# register a couple of runtime environment variables
ctx.declare_runtime_env('PATH')
ctx.declare_runtime_env('RPATH')
ctx.declare_runtime_env('LD_LIBRARY_PATH')
ctx.declare_runtime_env('PYTHONPATH')
if ctx.is_darwin():
ctx.declare_runtime_env('DYLD_LIBRARY_PATH')
pass
ctx.declare_runtime_env('PKG_CONFIG_PATH')
ctx.declare_runtime_env('CMTCFG')
for k in ['CPPFLAGS',
'CFLAGS',
'CCFLAGS',
'CXXFLAGS',
'FCFLAGS',
'LINKFLAGS',
'SHLINKFLAGS',
'SHLIB_MARKER',
'AR',
'ARFLAGS',
'CC',
'CXX',
'LINK_CC',
'LINK_CXX',
'LIBPATH',
'DEFINES',
'EXTERNAL_AREA',
'INSTALL_AREA',
'INSTALL_AREA_BINDIR',
'INSTALL_AREA_LIBDIR',
'PREFIX',
'DESTDIR',
'BINDIR',
'LIBDIR',
'HOME',
'EDITOR',
'USER',
'LANG',
'LC_ALL',
'TERM',
'TERMCAP',
'HISTORY',
'HISTSIZE',
'PS1',
'SHELL',
'PWD',
'OLDPWD',
'DISPLAY',
]:
ctx.declare_runtime_env(k)
pass
# configure project
ctx._hwaf_configure_project()
# display project infos...
msg.info('='*80)
ctx.msg('project', '%s-%s' % (ctx.env.HWAF_PROJECT_NAME,
ctx.env.HWAF_PROJECT_VERSION))
ctx.msg('prefix', ctx.env.PREFIX)
if ctx.env.DESTDIR:
ctx.msg('destdir', ctx.env.DESTDIR)
pass
ctx.msg('pkg dir', ctx.env.CMTPKGS)
ctx.msg('variant', ctx.env.CMTCFG)
ctx.msg('arch', ctx.env.CFG_ARCH)
ctx.msg('OS', ctx.env.CFG_OS)
ctx.msg('compiler', ctx.env.CFG_COMPILER)
ctx.msg('build-type', ctx.env.CFG_TYPE)
deps = ctx.hwaf_project_deps()
if deps: deps = ','.join(deps)
else: deps = 'None'
ctx.msg('projects deps', deps)
ctx.msg('install-area', ctx.env.INSTALL_AREA)
ctx.msg('njobs-max', waflib.Options.options.jobs)
msg.info('='*80)
return
def build(ctx):
ctx.load('hwaf-system', tooldir=_heptooldir)
ctx.load('hwaf-dist', tooldir=_heptooldir)
ctx.load('hwaf-project-mgr', tooldir=_heptooldir)
ctx.load('hwaf-runtime', tooldir=_heptooldir)
ctx._hwaf_load_project_hwaf_module(do_export=False)
return
### ---------------------------------------------------------------------------
@conf
def hwaf_get_install_path(self, k, destdir=True):
"""
Installation path obtained from ``self.dest`` and prefixed by the destdir.
The variables such as '${PREFIX}/bin' are substituted.
"""
dest = waflib.Utils.subst_vars(k, self.env)
dest = dest.replace('/', os.sep)
if destdir and self.env.DESTDIR:
destdir = self.env.DESTDIR
dest = os.path.join(destdir, osp.splitdrive(dest)[1].lstrip(os.sep))
pass
return dest
### ---------------------------------------------------------------------------
@conf
def hwaf_find_subpackages(self, directory='.'):
srcs = []
root_node = self.path.find_dir(directory)
dirs = root_node.ant_glob('**/*', src=False, dir=True)
for d in dirs:
#msg.debug ("##> %s (type: %s)" % (d.abspath(), type(d)))
node = d
if node and node.ant_glob('wscript'):
#msg.debug ("##> %s" % d.srcpath())
srcs.append(d)
pass
pass
return srcs
### ---------------------------------------------------------------------------
def hwaf_find_suboptions(directory='.'):
pkgs = []
for root, dirs, files in os.walk(directory):
if 'wscript' in files:
pkgs.append(root)
continue
return pkgs
### ---------------------------------------------------------------------------
@conf
def find_at(ctx, check, what, where, **kwargs):
if not osp.exists(where):
return False
def _subst(v):
v = waflib.Utils.subst_vars(v, ctx.env)
return v
os_env = dict(os.environ)
pkgp = os.getenv("PKG_CONFIG_PATH", "")
try:
ctx.env.stash()
ctx.env[what + "_HOME"] = where
incdir = osp.join(where, "include")
bindir = osp.join(where, "bin")
libdir = osp.join(where, "lib")
ctx.env.append_value('PATH', bindir)
ctx.env.append_value('RPATH', libdir)
ctx.env.append_value('LD_LIBRARY_PATH', libdir)
os_keys = ("PATH", "RPATH", "LD_LIBRARY_PATH")
if ctx.is_darwin():
os_keys += ("DYLD_LIBRARY_PATH",)
ctx.env.append_value('DYLD_LIBRARY_PATH', libdir)
os.environ['DYLD_LIBRARY_PATH'] = os.sep.join(ctx.env['DYLD_LIBRARY_PATH'])
pass
pkgconf_path = osp.join(where, "lib/pkgconfig")
ctx.env.append_value('PKG_CONFIG_PATH', pkgconf_path)
ctx.to_log("Pkg config path: %s" % ctx.env.PKG_CONFIG_PATH)
for kk in os_keys:
os.environ[kk] = os.pathsep.join(ctx.env[kk]+[os.getenv(kk,'')])
pass
if pkgp:
os.environ["PKG_CONFIG_PATH"] = os.pathsep.join((pkgconf_path,pkgp))
else:
os.environ["PKG_CONFIG_PATH"] = pkgconf_path
if osp.exists(incdir):
ctx.parse_flags(_subst("${CPPPATH_ST}") % incdir,
uselib_store=kwargs["uselib_store"])
if osp.exists(libdir):
ctx.parse_flags(_subst("${LIBPATH_ST}") % libdir,
uselib_store=kwargs["uselib_store"])
this_kwargs = kwargs.copy()
this_kwargs['check_path'] = where
if check == ctx.check_cfg:
# check if the special xyz-config binary exists...
if not this_kwargs['package'] and not osp.exists(bindir):
ctx.fatal("no such directory [%s]" % bindir)
pass
check(**this_kwargs)
return True
except ctx.errors.ConfigurationError:
os.environ = os_env
os.environ["PKG_CONFIG_PATH"] = pkgp
ctx.end_msg("failed", color="YELLOW")
ctx.env.revert()
return False
return False
### ---------------------------------------------------------------------------
@conf
def check_with(ctx, check, what, *args, **kwargs):
"""
Perform `check`, also looking at directories specified by the --with-X
commandline option and X_HOME environment variable (X = what.upper())
The extra_args
"""
import os
from os.path import abspath
# adds 'extra_paths' and other defaults...
kwargs = ctx._findbase_setup(kwargs)
with_dir = getattr(ctx.options, "with_" + what, None)
env_dir = os.environ.get(what.upper() + "_HOME", None)
paths = [with_dir, env_dir] + kwargs.pop("extra_paths", [])
WHAT = what.upper()
kwargs["uselib_store"] = kwargs.get("uselib_store", WHAT)
kwargs["use"] = waflib.Utils.to_list(kwargs.get("use", [])) + \
waflib.Utils.to_list(kwargs["uselib_store"])
for path in [abspath(p) for p in paths if p]:
ctx.in_msg = 0
ctx.to_log("Checking for %s in %s" % (what, path))
if ctx.find_at(check, WHAT, path, **kwargs):
#print ">> found %s at %s" % (what, path)
ctx.in_msg = 0
ctx.msg("Found %s at" % what, path, color="WHITE")
ctx.declare_runtime_env(WHAT + "_HOME")
return
pass
ctx.in_msg = 0
check(**kwargs)
ctx.in_msg = 0
ctx.msg("Found %s at" % what, "(local environment)", color="WHITE")
# FIXME: handle windows ?
ctx.env[WHAT + "_HOME"] = "/usr"
ctx.declare_runtime_env(WHAT + "_HOME")
return
### ---------------------------------------------------------------------------
@conf
def _findbase_setup(ctx, kwargs):
extra_paths = []
if ctx.is_linux() or \
ctx.is_freebsd() or \
ctx.is_darwin():
extra_paths.extend([
#"/usr",
#"/usr/local",
])
# FIXME: should use use_macports...
if ctx.is_darwin(): # and ctx.options.use_macports:
extra_paths.extend([
# macports
"/opt/local",
])
# FIXME: should use with_fink
if ctx.is_darwin(): # and ctx.options.with_fink:
extra_paths.extend([
# fink
"/sw",
])
kwargs['extra_paths'] = waflib.Utils.to_list(
kwargs.get('extra_paths', [])) + extra_paths
kwargs['_check_mandatory'] = kwargs.get('mandatory', True)
kwargs[ 'mandatory'] = kwargs.get('mandatory', True)
return kwargs
### ---------------------------------------------------------------------------
@conf
def read_cfg(ctx, fname):
"""
read_cfg reads a MANIFEST-like file to extract a configuration.
That configuration file must be in a format that ConfigParser understands.
"""
fname = osp.abspath(fname)
if not osp.exists(fname):
ctx.fatal("no such file [%s]" % fname)
return False
try: from ConfigParser import SafeConfigParser as CfgParser
except ImportError: from configparser import ConfigParser as CfgParser
cfg = CfgParser()
cfg.read([fname])
# top-level config
if cfg.has_section('hwaf-cfg'):
section = 'hwaf-cfg'
for k in ('cmtcfg', 'prefix', 'projects', 'cmtpkgs'):
if cfg.has_option(section, k):
#msg.info("....[%s]..." % k)
if not (None == getattr(ctx.options, k)):
# user provided a value from command-line: that wins.
pass
else:
v = cfg.get(section, k)
setattr(ctx.options, k, v)
#ctx.msg(k, v)
pass
pass
pass
# pkg-level config
for section in cfg.sections():
if not hasattr(ctx.options, 'with_%s' % section):
continue
v = getattr(ctx.options, 'with_%s' % section)
if not (v == None):
# user provided a value from command-line
continue
if not cfg.has_option(section, 'path'):
# no useful info
continue
v = cfg.get(section, 'path')
setattr(ctx.options, 'with_%s' % section, v)
pass
return True
### ---------------------------------------------------------------------------
@conf
def copy_uselib_defs(ctx, dst, src):
for n in ('LIB', 'LIBPATH',
'STLIB', 'STLIBPATH',
'LINKFLAGS', 'RPATH',
'CFLAGS', 'CXXFLAGS',
'DFLAGS',
'INCLUDES',
'CXXDEPS', 'CCDEPS', 'LINKDEPS',
'DEFINES',
'FRAMEWORK', 'FRAMEWORKPATH',
'ARCH'):
ctx.env['%s_%s' % (n,dst)] = ctx.env['%s_%s' % (n,src)]
ctx.env.append_unique('DEFINES', 'HAVE_%s=1' % dst.upper())
return
### ---------------------------------------------------------------------------
@conf
def define_uselib(self, name, libpath, libname, incpath, incname):
"""
define_uselib creates the proper uselib variables based on the ``name``
with the correct library-path ``libpath``, library name ``libname``,
include-path ``incpath`` and header file ``incname``
"""
ctx = self
n = name
if libpath:
libpath = waflib.Utils.to_list(libpath)
ctx.env['LIBPATH_%s'%n] = libpath
pass
if libname:
libname = waflib.Utils.to_list(libname)
ctx.env['LIB_%s'%n] = libname
pass
if incpath:
incpath = waflib.Utils.to_list(incpath)
ctx.env['INCLUDES_%s'%n] = incpath
pass
NAME = name.upper().replace('-','_')
ctx.env.append_unique('DEFINES', 'HAVE_%s=1' % NAME)
return
### ------------------------------------------------------------------------
@conf
def declare_runtime_env(self, k):
'''
declare_runtime_env register a particular key ``k`` as the name of an
environment variable the project will need at runtime.
'''
if not self.env.HWAF_RUNTIME_ENVVARS:
self.env.HWAF_RUNTIME_ENVVARS = []
pass
if msg.verbose:
v = self.env[k]
if v and isinstance(v, (list,tuple)) and len(v) != 1:
raise KeyError("env[%s]=%s" % (k,v))
self.env.append_unique('HWAF_RUNTIME_ENVVARS', k)
### ------------------------------------------------------------------------
@conf
def hwaf_export_module(self, fname="wscript"):
'''
hwaf_export_module registers the ``fname`` file for export.
it will be installed in the ${PREFIX}/share/hwaf directory to be picked
up by dependent projects.
'''
if not self.env.HWAF_MODULES:
self.env.HWAF_MODULES = []
pass
node = None
if osp.isabs(fname): node = self.root.find_or_declare(fname)
else: node = self.path.find_node(fname)
if not node: self.fatal("could not find [%s]" % fname)
#msg.info("::: exporting [%s]" % node.abspath())
self.env.append_unique('HWAF_MODULES', node.abspath())
### ------------------------------------------------------------------------
@conf
def _get_env_for_subproc(self, os_env_keys=None):
import os
#env = dict(os.environ)
#waf_env = dict(self.env)
#for k,v in waf_env.items():
env = dict(os.environ)
#env = dict(self.env)
if not os_env_keys:
os_env_keys = []
os_env_keys += self.env.HWAF_RUNTIME_ENVVARS
for k,v in dict(self.env).items():
if not k in os_env_keys:
try: del env[k]
except KeyError:pass
continue
v = self.env[k]
#print("-- %s %s %r" % (k, type(k), v))
if isinstance(v, (list,tuple)):
v = list(v)
for i,_ in enumerate(v):
if hasattr(v[i], 'abspath'):
v[i] = v[i].abspath()
else:
v[i] = str(v[i])
pass
pass
# handle xyzPATH variables (LD_LIBRARY_PATH, PYTHONPATH,...)
if k.lower().endswith('path'):
#print (">>> %s: %r" % (k,v))
env[k] = os.pathsep.join(v)
else:
env[k] = ' '.join(v)
else:
env[k] = str(v)
pass
pass
bld_area = self.env['BUILD_INSTALL_AREA']
if bld_area:
env['LD_LIBRARY_PATH'] = os.pathsep.join(
[os.path.join(bld_area,'lib')]
+waflib.Utils.to_list(self.env['LD_LIBRARY_PATH'])
+[os.environ.get('LD_LIBRARY_PATH','')])
env['PATH'] = os.pathsep.join(
[os.path.join(bld_area,'bin')]
+waflib.Utils.to_list(self.env['PATH'])
+[os.environ.get('PATH','')])
env['PYTHONPATH'] = os.pathsep.join(
[os.path.join(bld_area,'python')]
+waflib.Utils.to_list(self.env['PYTHONPATH'])
+[os.environ.get('PYTHONPATH','')])
if self.is_darwin():
env['DYLD_LIBRARY_PATH'] = os.pathsep.join(
[os.path.join(bld_area,'lib')]
+waflib.Utils.to_list(self.env['DYLD_LIBRARY_PATH'])
+[os.environ.get('DYLD_LIBRARY_PATH','')])
pass
else:
env['LD_LIBRARY_PATH'] = os.pathsep.join(
waflib.Utils.to_list(self.env['LD_LIBRARY_PATH'])
+[os.environ.get('LD_LIBRARY_PATH','')])
env['PATH'] = os.pathsep.join(
waflib.Utils.to_list(self.env['PATH'])
+[os.environ.get('PATH','')])
env['PYTHONPATH'] = os.pathsep.join(
waflib.Utils.to_list(self.env['PYTHONPATH'])
+[os.environ.get('PYTHONPATH','')])
if self.is_darwin():
env['DYLD_LIBRARY_PATH'] = os.pathsep.join(
waflib.Utils.to_list(self.env['DYLD_LIBRARY_PATH'])
+[os.environ.get('DYLD_LIBRARY_PATH','')])
pass
pass
for k in ('CPPFLAGS',
'CFLAGS',
'CCFLAGS',
'CXXFLAGS',
'FCFLAGS',
'LINKFLAGS',
'SHLINKFLAGS',
'AR',
'ARFLAGS',
'CC',
'CXX',
'LINK_CC',
'LINK_CXX',
):
v = self.env.get_flat(k)
env[k] = str(v)
pass
env['SHLINKFLAGS'] += ' '+self.env.get_flat('LINKFLAGS_cshlib')
env['SHEXT'] = self.dso_ext()[1:]
for k,v in env.items():
if not isinstance(v, str):
raise KeyError("env[%s]=%s" % (k,v))
return env
### ------------------------------------------------------------------------
@conf
def _get_pkg_name(self):
# FIXME: should this be more explicit ?
pkg_name = self.path.name
return pkg_name
### ------------------------------------------------------------------------
@conf
def _get_pkg_version_defines(self):
pkg_name = _get_pkg_name(self)
pkg_vers = "%s-XX-XX-XX" % pkg_name
pkg_defines = ['PACKAGE_VERSION="%s"' % pkg_vers,
'PACKAGE_VERSION_UQ=%s'% pkg_vers]
cmt_dir_node = self.path.get_src().find_dir('cmt')
if not cmt_dir_node:
return pkg_defines
version_cmt = cmt_dir_node.find_resource('version.cmt')
if not version_cmt:
return pkg_defines
pkg_vers = version_cmt.read().strip()
pkg_defines = ['PACKAGE_VERSION="%s"' % pkg_vers,
'PACKAGE_VERSION_UQ=%s'% pkg_vers]
#msg.debug("*** %s %r" % (pkg_name, pkg_vers))
return pkg_defines
## EOF ##
hwaf-base: recreate hwaf-module for current-project during build phase (to allow 'hwaf clean build')
# -*- python -*-
# stdlib imports
import os
import os.path as osp
import sys
# waf imports ---
import waflib.Options
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
_heptooldir = osp.dirname(osp.abspath(__file__))
# add this directory to sys.path to ease the loading of other hepwaf tools
if not _heptooldir in sys.path: sys.path.append(_heptooldir)
### ---------------------------------------------------------------------------
def options(ctx):
if 'darwin' in sys.platform:
ctx.add_option(
'--use-macports',
default=None,
action='store_true',
help="Enable MacPorts")
ctx.add_option(
'--use-fink',
default=None,
action='store_true',
help="Enable Fink")
pass
ctx.add_option(
'--relocate-from',
default=None,
help='top-level path to relocate against (default=${PREFIX})',
)
ctx.add_option(
'--project-version',
default=None,
help='modify the project version used during build',
)
ctx.add_option(
'--local-cfg',
default=None,
help="Path to the local config file listing all type of configuration infos")
ctx.load('hwaf-system', tooldir=_heptooldir)
ctx.load('hwaf-dist', tooldir=_heptooldir)
ctx.load('hwaf-project-mgr', tooldir=_heptooldir)
ctx.load('hwaf-runtime', tooldir=_heptooldir)
pkgdir = 'src'
if osp.exists(pkgdir):
pkgs = hwaf_find_suboptions(pkgdir)
ctx.recurse(pkgs, mandatory=False)
return
### ---------------------------------------------------------------------------
def configure(ctx):
if ctx.options.local_cfg:
fname = osp.abspath(ctx.options.local_cfg)
ctx.start_msg("Manifest file")
ctx.end_msg(fname)
ok = ctx.read_cfg(fname)
ctx.start_msg("Manifest file processing")
ctx.end_msg(ok)
pass
if not ctx.env.HWAF_MODULES: ctx.env.HWAF_MODULES = []
ctx.load('hwaf-system', tooldir=_heptooldir)
ctx.load('hwaf-dist', tooldir=_heptooldir)
ctx.load('hwaf-project-mgr', tooldir=_heptooldir)
ctx.load('hwaf-runtime', tooldir=_heptooldir)
# register a couple of runtime environment variables
ctx.declare_runtime_env('PATH')
ctx.declare_runtime_env('RPATH')
ctx.declare_runtime_env('LD_LIBRARY_PATH')
ctx.declare_runtime_env('PYTHONPATH')
if ctx.is_darwin():
ctx.declare_runtime_env('DYLD_LIBRARY_PATH')
pass
ctx.declare_runtime_env('PKG_CONFIG_PATH')
ctx.declare_runtime_env('CMTCFG')
for k in ['CPPFLAGS',
'CFLAGS',
'CCFLAGS',
'CXXFLAGS',
'FCFLAGS',
'LINKFLAGS',
'SHLINKFLAGS',
'SHLIB_MARKER',
'AR',
'ARFLAGS',
'CC',
'CXX',
'LINK_CC',
'LINK_CXX',
'LIBPATH',
'DEFINES',
'EXTERNAL_AREA',
'INSTALL_AREA',
'INSTALL_AREA_BINDIR',
'INSTALL_AREA_LIBDIR',
'PREFIX',
'DESTDIR',
'BINDIR',
'LIBDIR',
'HOME',
'EDITOR',
'USER',
'LANG',
'LC_ALL',
'TERM',
'TERMCAP',
'HISTORY',
'HISTSIZE',
'PS1',
'SHELL',
'PWD',
'OLDPWD',
'DISPLAY',
]:
ctx.declare_runtime_env(k)
pass
# configure project
ctx._hwaf_configure_project()
# display project infos...
msg.info('='*80)
ctx.msg('project', '%s-%s' % (ctx.env.HWAF_PROJECT_NAME,
ctx.env.HWAF_PROJECT_VERSION))
ctx.msg('prefix', ctx.env.PREFIX)
if ctx.env.DESTDIR:
ctx.msg('destdir', ctx.env.DESTDIR)
pass
ctx.msg('pkg dir', ctx.env.CMTPKGS)
ctx.msg('variant', ctx.env.CMTCFG)
ctx.msg('arch', ctx.env.CFG_ARCH)
ctx.msg('OS', ctx.env.CFG_OS)
ctx.msg('compiler', ctx.env.CFG_COMPILER)
ctx.msg('build-type', ctx.env.CFG_TYPE)
deps = ctx.hwaf_project_deps()
if deps: deps = ','.join(deps)
else: deps = 'None'
ctx.msg('projects deps', deps)
ctx.msg('install-area', ctx.env.INSTALL_AREA)
ctx.msg('njobs-max', waflib.Options.options.jobs)
msg.info('='*80)
return
def build(ctx):
ctx.load('hwaf-system', tooldir=_heptooldir)
ctx.load('hwaf-dist', tooldir=_heptooldir)
ctx.load('hwaf-project-mgr', tooldir=_heptooldir)
ctx.load('hwaf-runtime', tooldir=_heptooldir)
ctx._hwaf_create_project_hwaf_module()
ctx._hwaf_load_project_hwaf_module(do_export=False)
return
### ---------------------------------------------------------------------------
@conf
def hwaf_get_install_path(self, k, destdir=True):
"""
Installation path obtained from ``self.dest`` and prefixed by the destdir.
The variables such as '${PREFIX}/bin' are substituted.
"""
dest = waflib.Utils.subst_vars(k, self.env)
dest = dest.replace('/', os.sep)
if destdir and self.env.DESTDIR:
destdir = self.env.DESTDIR
dest = os.path.join(destdir, osp.splitdrive(dest)[1].lstrip(os.sep))
pass
return dest
### ---------------------------------------------------------------------------
@conf
def hwaf_find_subpackages(self, directory='.'):
srcs = []
root_node = self.path.find_dir(directory)
dirs = root_node.ant_glob('**/*', src=False, dir=True)
for d in dirs:
#msg.debug ("##> %s (type: %s)" % (d.abspath(), type(d)))
node = d
if node and node.ant_glob('wscript'):
#msg.debug ("##> %s" % d.srcpath())
srcs.append(d)
pass
pass
return srcs
### ---------------------------------------------------------------------------
def hwaf_find_suboptions(directory='.'):
pkgs = []
for root, dirs, files in os.walk(directory):
if 'wscript' in files:
pkgs.append(root)
continue
return pkgs
### ---------------------------------------------------------------------------
@conf
def find_at(ctx, check, what, where, **kwargs):
if not osp.exists(where):
return False
def _subst(v):
v = waflib.Utils.subst_vars(v, ctx.env)
return v
os_env = dict(os.environ)
pkgp = os.getenv("PKG_CONFIG_PATH", "")
try:
ctx.env.stash()
ctx.env[what + "_HOME"] = where
incdir = osp.join(where, "include")
bindir = osp.join(where, "bin")
libdir = osp.join(where, "lib")
ctx.env.append_value('PATH', bindir)
ctx.env.append_value('RPATH', libdir)
ctx.env.append_value('LD_LIBRARY_PATH', libdir)
os_keys = ("PATH", "RPATH", "LD_LIBRARY_PATH")
if ctx.is_darwin():
os_keys += ("DYLD_LIBRARY_PATH",)
ctx.env.append_value('DYLD_LIBRARY_PATH', libdir)
os.environ['DYLD_LIBRARY_PATH'] = os.sep.join(ctx.env['DYLD_LIBRARY_PATH'])
pass
pkgconf_path = osp.join(where, "lib/pkgconfig")
ctx.env.append_value('PKG_CONFIG_PATH', pkgconf_path)
ctx.to_log("Pkg config path: %s" % ctx.env.PKG_CONFIG_PATH)
for kk in os_keys:
os.environ[kk] = os.pathsep.join(ctx.env[kk]+[os.getenv(kk,'')])
pass
if pkgp:
os.environ["PKG_CONFIG_PATH"] = os.pathsep.join((pkgconf_path,pkgp))
else:
os.environ["PKG_CONFIG_PATH"] = pkgconf_path
if osp.exists(incdir):
ctx.parse_flags(_subst("${CPPPATH_ST}") % incdir,
uselib_store=kwargs["uselib_store"])
if osp.exists(libdir):
ctx.parse_flags(_subst("${LIBPATH_ST}") % libdir,
uselib_store=kwargs["uselib_store"])
this_kwargs = kwargs.copy()
this_kwargs['check_path'] = where
if check == ctx.check_cfg:
# check if the special xyz-config binary exists...
if not this_kwargs['package'] and not osp.exists(bindir):
ctx.fatal("no such directory [%s]" % bindir)
pass
check(**this_kwargs)
return True
except ctx.errors.ConfigurationError:
os.environ = os_env
os.environ["PKG_CONFIG_PATH"] = pkgp
ctx.end_msg("failed", color="YELLOW")
ctx.env.revert()
return False
return False
### ---------------------------------------------------------------------------
@conf
def check_with(ctx, check, what, *args, **kwargs):
"""
Perform `check`, also looking at directories specified by the --with-X
commandline option and X_HOME environment variable (X = what.upper())
The extra_args
"""
import os
from os.path import abspath
# adds 'extra_paths' and other defaults...
kwargs = ctx._findbase_setup(kwargs)
with_dir = getattr(ctx.options, "with_" + what, None)
env_dir = os.environ.get(what.upper() + "_HOME", None)
paths = [with_dir, env_dir] + kwargs.pop("extra_paths", [])
WHAT = what.upper()
kwargs["uselib_store"] = kwargs.get("uselib_store", WHAT)
kwargs["use"] = waflib.Utils.to_list(kwargs.get("use", [])) + \
waflib.Utils.to_list(kwargs["uselib_store"])
for path in [abspath(p) for p in paths if p]:
ctx.in_msg = 0
ctx.to_log("Checking for %s in %s" % (what, path))
if ctx.find_at(check, WHAT, path, **kwargs):
#print ">> found %s at %s" % (what, path)
ctx.in_msg = 0
ctx.msg("Found %s at" % what, path, color="WHITE")
ctx.declare_runtime_env(WHAT + "_HOME")
return
pass
ctx.in_msg = 0
check(**kwargs)
ctx.in_msg = 0
ctx.msg("Found %s at" % what, "(local environment)", color="WHITE")
# FIXME: handle windows ?
ctx.env[WHAT + "_HOME"] = "/usr"
ctx.declare_runtime_env(WHAT + "_HOME")
return
### ---------------------------------------------------------------------------
@conf
def _findbase_setup(ctx, kwargs):
extra_paths = []
if ctx.is_linux() or \
ctx.is_freebsd() or \
ctx.is_darwin():
extra_paths.extend([
#"/usr",
#"/usr/local",
])
# FIXME: should use use_macports...
if ctx.is_darwin(): # and ctx.options.use_macports:
extra_paths.extend([
# macports
"/opt/local",
])
# FIXME: should use with_fink
if ctx.is_darwin(): # and ctx.options.with_fink:
extra_paths.extend([
# fink
"/sw",
])
kwargs['extra_paths'] = waflib.Utils.to_list(
kwargs.get('extra_paths', [])) + extra_paths
kwargs['_check_mandatory'] = kwargs.get('mandatory', True)
kwargs[ 'mandatory'] = kwargs.get('mandatory', True)
return kwargs
### ---------------------------------------------------------------------------
@conf
def read_cfg(ctx, fname):
"""
read_cfg reads a MANIFEST-like file to extract a configuration.
That configuration file must be in a format that ConfigParser understands.
"""
fname = osp.abspath(fname)
if not osp.exists(fname):
ctx.fatal("no such file [%s]" % fname)
return False
try: from ConfigParser import SafeConfigParser as CfgParser
except ImportError: from configparser import ConfigParser as CfgParser
cfg = CfgParser()
cfg.read([fname])
# top-level config
if cfg.has_section('hwaf-cfg'):
section = 'hwaf-cfg'
for k in ('cmtcfg', 'prefix', 'projects', 'cmtpkgs'):
if cfg.has_option(section, k):
#msg.info("....[%s]..." % k)
if not (None == getattr(ctx.options, k)):
# user provided a value from command-line: that wins.
pass
else:
v = cfg.get(section, k)
setattr(ctx.options, k, v)
#ctx.msg(k, v)
pass
pass
pass
# pkg-level config
for section in cfg.sections():
if not hasattr(ctx.options, 'with_%s' % section):
continue
v = getattr(ctx.options, 'with_%s' % section)
if not (v == None):
# user provided a value from command-line
continue
if not cfg.has_option(section, 'path'):
# no useful info
continue
v = cfg.get(section, 'path')
setattr(ctx.options, 'with_%s' % section, v)
pass
return True
### ---------------------------------------------------------------------------
@conf
def copy_uselib_defs(ctx, dst, src):
for n in ('LIB', 'LIBPATH',
'STLIB', 'STLIBPATH',
'LINKFLAGS', 'RPATH',
'CFLAGS', 'CXXFLAGS',
'DFLAGS',
'INCLUDES',
'CXXDEPS', 'CCDEPS', 'LINKDEPS',
'DEFINES',
'FRAMEWORK', 'FRAMEWORKPATH',
'ARCH'):
ctx.env['%s_%s' % (n,dst)] = ctx.env['%s_%s' % (n,src)]
ctx.env.append_unique('DEFINES', 'HAVE_%s=1' % dst.upper())
return
### ---------------------------------------------------------------------------
@conf
def define_uselib(self, name, libpath, libname, incpath, incname):
"""
define_uselib creates the proper uselib variables based on the ``name``
with the correct library-path ``libpath``, library name ``libname``,
include-path ``incpath`` and header file ``incname``
"""
ctx = self
n = name
if libpath:
libpath = waflib.Utils.to_list(libpath)
ctx.env['LIBPATH_%s'%n] = libpath
pass
if libname:
libname = waflib.Utils.to_list(libname)
ctx.env['LIB_%s'%n] = libname
pass
if incpath:
incpath = waflib.Utils.to_list(incpath)
ctx.env['INCLUDES_%s'%n] = incpath
pass
NAME = name.upper().replace('-','_')
ctx.env.append_unique('DEFINES', 'HAVE_%s=1' % NAME)
return
### ------------------------------------------------------------------------
@conf
def declare_runtime_env(self, k):
'''
declare_runtime_env register a particular key ``k`` as the name of an
environment variable the project will need at runtime.
'''
if not self.env.HWAF_RUNTIME_ENVVARS:
self.env.HWAF_RUNTIME_ENVVARS = []
pass
if msg.verbose:
v = self.env[k]
if v and isinstance(v, (list,tuple)) and len(v) != 1:
raise KeyError("env[%s]=%s" % (k,v))
self.env.append_unique('HWAF_RUNTIME_ENVVARS', k)
### ------------------------------------------------------------------------
@conf
def hwaf_export_module(self, fname="wscript"):
'''
hwaf_export_module registers the ``fname`` file for export.
it will be installed in the ${PREFIX}/share/hwaf directory to be picked
up by dependent projects.
'''
if not self.env.HWAF_MODULES:
self.env.HWAF_MODULES = []
pass
node = None
if osp.isabs(fname): node = self.root.find_or_declare(fname)
else: node = self.path.find_node(fname)
if not node: self.fatal("could not find [%s]" % fname)
#msg.info("::: exporting [%s]" % node.abspath())
self.env.append_unique('HWAF_MODULES', node.abspath())
### ------------------------------------------------------------------------
@conf
def _get_env_for_subproc(self, os_env_keys=None):
import os
#env = dict(os.environ)
#waf_env = dict(self.env)
#for k,v in waf_env.items():
env = dict(os.environ)
#env = dict(self.env)
if not os_env_keys:
os_env_keys = []
os_env_keys += self.env.HWAF_RUNTIME_ENVVARS
for k,v in dict(self.env).items():
if not k in os_env_keys:
try: del env[k]
except KeyError:pass
continue
v = self.env[k]
#print("-- %s %s %r" % (k, type(k), v))
if isinstance(v, (list,tuple)):
v = list(v)
for i,_ in enumerate(v):
if hasattr(v[i], 'abspath'):
v[i] = v[i].abspath()
else:
v[i] = str(v[i])
pass
pass
# handle xyzPATH variables (LD_LIBRARY_PATH, PYTHONPATH,...)
if k.lower().endswith('path'):
#print (">>> %s: %r" % (k,v))
env[k] = os.pathsep.join(v)
else:
env[k] = ' '.join(v)
else:
env[k] = str(v)
pass
pass
bld_area = self.env['BUILD_INSTALL_AREA']
if bld_area:
env['LD_LIBRARY_PATH'] = os.pathsep.join(
[os.path.join(bld_area,'lib')]
+waflib.Utils.to_list(self.env['LD_LIBRARY_PATH'])
+[os.environ.get('LD_LIBRARY_PATH','')])
env['PATH'] = os.pathsep.join(
[os.path.join(bld_area,'bin')]
+waflib.Utils.to_list(self.env['PATH'])
+[os.environ.get('PATH','')])
env['PYTHONPATH'] = os.pathsep.join(
[os.path.join(bld_area,'python')]
+waflib.Utils.to_list(self.env['PYTHONPATH'])
+[os.environ.get('PYTHONPATH','')])
if self.is_darwin():
env['DYLD_LIBRARY_PATH'] = os.pathsep.join(
[os.path.join(bld_area,'lib')]
+waflib.Utils.to_list(self.env['DYLD_LIBRARY_PATH'])
+[os.environ.get('DYLD_LIBRARY_PATH','')])
pass
else:
env['LD_LIBRARY_PATH'] = os.pathsep.join(
waflib.Utils.to_list(self.env['LD_LIBRARY_PATH'])
+[os.environ.get('LD_LIBRARY_PATH','')])
env['PATH'] = os.pathsep.join(
waflib.Utils.to_list(self.env['PATH'])
+[os.environ.get('PATH','')])
env['PYTHONPATH'] = os.pathsep.join(
waflib.Utils.to_list(self.env['PYTHONPATH'])
+[os.environ.get('PYTHONPATH','')])
if self.is_darwin():
env['DYLD_LIBRARY_PATH'] = os.pathsep.join(
waflib.Utils.to_list(self.env['DYLD_LIBRARY_PATH'])
+[os.environ.get('DYLD_LIBRARY_PATH','')])
pass
pass
for k in ('CPPFLAGS',
'CFLAGS',
'CCFLAGS',
'CXXFLAGS',
'FCFLAGS',
'LINKFLAGS',
'SHLINKFLAGS',
'AR',
'ARFLAGS',
'CC',
'CXX',
'LINK_CC',
'LINK_CXX',
):
v = self.env.get_flat(k)
env[k] = str(v)
pass
env['SHLINKFLAGS'] += ' '+self.env.get_flat('LINKFLAGS_cshlib')
env['SHEXT'] = self.dso_ext()[1:]
for k,v in env.items():
if not isinstance(v, str):
raise KeyError("env[%s]=%s" % (k,v))
return env
### ------------------------------------------------------------------------
@conf
def _get_pkg_name(self):
# FIXME: should this be more explicit ?
pkg_name = self.path.name
return pkg_name
### ------------------------------------------------------------------------
@conf
def _get_pkg_version_defines(self):
pkg_name = _get_pkg_name(self)
pkg_vers = "%s-XX-XX-XX" % pkg_name
pkg_defines = ['PACKAGE_VERSION="%s"' % pkg_vers,
'PACKAGE_VERSION_UQ=%s'% pkg_vers]
cmt_dir_node = self.path.get_src().find_dir('cmt')
if not cmt_dir_node:
return pkg_defines
version_cmt = cmt_dir_node.find_resource('version.cmt')
if not version_cmt:
return pkg_defines
pkg_vers = version_cmt.read().strip()
pkg_defines = ['PACKAGE_VERSION="%s"' % pkg_vers,
'PACKAGE_VERSION_UQ=%s'% pkg_vers]
#msg.debug("*** %s %r" % (pkg_name, pkg_vers))
return pkg_defines
## EOF ##
|
#!/usr/bin/env python3
import os
import sys
import re
import glob
import json
import argparse
from datetime import datetime
from collections import OrderedDict as odict
from multiprocessing import cpu_count as cpu_count
from .conf import *
from .util import *
from .cmake_sysinfo import *
from .vsinfo import *
# -----------------------------------------------------------------------------
class BuildItem:
"""A base class for build items."""
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def __str__(self):
return self.name
# -----------------------------------------------------------------------------
class BuildType(BuildItem):
"""Specifies a build type, ie, one of Release, Debug, etc"""
@staticmethod
def default():
return BuildType("Release")
# -----------------------------------------------------------------------------
class System(BuildItem):
"""Specifies an operating system"""
@staticmethod
def default():
"""return the current operating system"""
return System(__class__.default_str())
@staticmethod
def default_str():
s = CMakeSysInfo.system_name()
if s == "mac os x" or s == "Darwin":
s = "mac"
return s
# -----------------------------------------------------------------------------
class Architecture(BuildItem):
"""Specifies a processor architecture"""
@staticmethod
def default():
"""return the architecture of the current machine"""
return Architecture(__class__.default_str())
@staticmethod
def default_str():
s = CMakeSysInfo.architecture()
if s == "amd64":
s = "x86_64"
return s
# # http://stackoverflow.com/a/12578715/5875572
# import platform
# machine = platform.machine()
# if machine.endswith('64'):
# return Architecture('x86_64')
# elif machine.endswith('86'):
# return Architecture('x32')
# raise Exception("unknown architecture")
@property
def is64(self):
def fn():
s = re.search('64', self.name)
return s is not None
return cacheattr(self, "_is64", fn)
@property
def is32(self):
return not self.is64
@property
def is_arm(self):
return "arm" in self.name.lower()
# -----------------------------------------------------------------------------
class CompileOptions:
def __init__(self, name=""):
self.name = name
self.cmake_flags = []
self.cflags = []
self.lflags = []
self.macros = []
def merge(self, other):
"""other will take precedence, ie, their options will come last"""
c = CompileOptions()
c.name = self.name+"+"+other.name
c.cmake_flags = self.cmake_flags + other.cmake_flags
c.cflags = self.cflags + other.cflags
c.lflags = self.lflags + other.lflags
c.macros = self.macros + other.macros
return c
# -----------------------------------------------------------------------------
class Compiler(BuildItem):
"""Specifies a compiler"""
@staticmethod
def default():
return Compiler(__class__.default_str())
@staticmethod
def default_str():
if str(System.default()) != "windows":
cpp = CMakeSysInfo.cxx_compiler()
else:
vs = VisualStudioInfo.find_any()
cpp = vs.name if vs is not None else CMakeSysInfo.cxx_compiler()
return cpp
def __init__(self, path):
if path.startswith("vs") or path.startswith("Visual Studio"):
vs = VisualStudioInfo(path)
self.vs = vs
path = vs.cxx_compiler
else:
p = which(path)
if p is None:
raise Exception("compiler not found: " + path)
if p != path:
print("compiler: selected {} for {}".format(p, path))
path = os.path.abspath(p)
name, version, version_full = self.get_version(path)
self.shortname = name
self.gcclike = self.shortname in ('gcc', 'clang', 'icc')
self.is_msvc = self.shortname.startswith('vs')
if not self.is_msvc:
name += version
self.path = path
self.version = version
self.version_full = version_full
super().__init__(name)
self.c_compiler = __class__.get_c_compiler(self.shortname, self.path)
@staticmethod
def get_c_compiler(shortname, cxx_compiler):
# if cxx_compiler.endswith("c++") or cxx_compiler.endswith('c++.exe'):
# cc = re.sub(r'c\+\+', r'cc', cxx_compiler)
if shortname == "icc":
cc = re.sub(r'icpc', r'icc', cxx_compiler)
elif shortname == "gcc":
cc = re.sub(r'g\+\+', r'gcc', cxx_compiler)
elif shortname == "clang":
cc = re.sub(r'clang\+\+', r'clang', cxx_compiler)
else:
cc = cxx_compiler
return cc
def get_version(self, path):
# is this visual studio?
if hasattr(self, "vs"):
return self.vs.name, str(self.vs.year), self.vs.name
# # other compilers
# print("cmp: found compiler:", name, path)
out = runsyscmd([path, '--version'], echo_cmd=False, echo_output=False, capture_output=True).strip("\n")
version_full = out.split("\n")[0]
splits = version_full.split(" ")
name = splits[0].lower()
# print("cmp: version:", name, "---", firstline, "---")
vregex = r'(\d+\.\d+)\.\d+'
if name.startswith("g++") or name.startswith("gcc"):
name = "gcc"
version = runsyscmd([path, '-dumpversion'], echo_cmd=False, echo_output=False, capture_output=True).strip("\n")
version = re.sub(vregex, r'\1', version)
# print("gcc version:", version, "---")
elif name.startswith("clang"):
name = "clang"
version = re.sub(r'clang version ' + vregex + '.*', r'\1', version_full)
# print("clang version:", version, "---")
elif name.startswith("icpc"):
name = "icc"
version = re.sub(r'icpc \(ICC\) ' + vregex + '.*', r'\1', version_full)
# print("icc version:", version, "---")
else:
version = runsyscmd([path, '--dumpversion'], echo_cmd=False, echo_output=False, capture_output=True).strip("\n")
version = re.sub(vregex, r'\1', version)
#
return name, version, version_full
# -----------------------------------------------------------------------------
class Variant(BuildItem):
"""for variations in compile options"""
def __init__(self, name):
super().__init__(name)
self.options = CompileOptions()
# -----------------------------------------------------------------------------
class Generator(BuildItem):
"""Visual Studio aliases example:
vs2013: use the bitness of the default OS
vs2013_32: use 32bit version
vs2013_64: use 64bit version
"""
@staticmethod
def default():
return Generator(__class__.default_str(), cpu_count())
@staticmethod
def default_str():
s = CMakeSysInfo.generator()
return s
@staticmethod
def create_default(system, arch, compiler, num_jobs):
if not compiler.is_msvc:
if System.default_str() == "windows":
return Generator("Unix Makefiles", num_jobs)
else:
return Generator(__class__.default_str(), num_jobs)
else:
return Generator(compiler.name, num_jobs)
@staticmethod
def resolve_alias(gen):
if gen.startswith('vs') or gen.startswith('Visual Studio'):
return VisualStudioInfo.to_gen(gen)
return gen
def __init__(self, name, num_jobs):
if name.startswith('vs'):
n = name
name = VisualStudioInfo.to_gen(name)
self.alias = name
super().__init__(name)
self.num_jobs = num_jobs
self.is_makefile = name.endswith("Makefiles")
self.is_ninja = name.endswith("Ninja")
self.is_msvc = name.startswith("Visual Studio")
def configure_args(self, build):
if self.name != "":
if self.is_msvc and build.compiler.vs.toolset is not None:
return ['-G', '"{}"'.format(self.name), '-T', build.compiler.vs.toolset]
else:
return ['-G', '"{}"'.format(self.name)]
else:
return []
def cmd(self, targets, build):
if self.is_makefile:
return ['make', '-j', str(self.num_jobs)] + targets
elif self.is_msvc:
if not hasattr(self, "sln"):
sln_files = glob.glob("*.sln")
if len(sln_files) != 1:
raise Exception("there's more than one solution file in the project folder")
self.sln = sln_files[0]
return [build.compiler.vs.msbuild, self.sln,
'/maxcpucount:'+str(self.num_jobs),
'/property:Configuration='+str(build.buildtype),
'/target:'+';'.join(targets)]
else:
return ['cmake', '--build', '.', '--config', str(build.buildtype) ] + ['--target '+ t for t in targets ]
def install(self, build):
return ['cmake', '--build', '.', '--config', str(build.buildtype), '--target', 'install']
"""
generators: https://cmake.org/cmake/help/v3.7/manual/cmake-generators.7.html
Unix Makefiles
MSYS Makefiles
MinGW Makefiles
NMake Makefiles
Ninja
Watcom WMake
CodeBlocks - Ninja
CodeBlocks - Unix Makefiles
CodeBlocks - MinGW Makefiles
CodeBlocks - NMake Makefiles
CodeLite - Ninja
CodeLite - Unix Makefiles
CodeLite - MinGW Makefiles
CodeLite - NMake Makefiles
Eclipse CDT4 - Ninja
Eclipse CDT4 - Unix Makefiles
Eclipse CDT4 - MinGW Makefiles
Eclipse CDT4 - NMake Makefiles
KDevelop3
KDevelop3 - Unix Makefiles
Kate - Ninja
Kate - Unix Makefiles
Kate - MinGW Makefiles
Kate - NMake Makefiles
Sublime Text 2 - Ninja
Sublime Text 2 - Unix Makefiles
Sublime Text 2 - MinGW Makefiles
Sublime Text 2 - NMake Makefiles
Visual Studio 6
Visual Studio 7
Visual Studio 7 .NET 2003
Visual Studio 8 2005 [Win64|IA64]
Visual Studio 9 2008 [Win64|IA64]
Visual Studio 10 2010 [Win64|IA64]
Visual Studio 11 2012 [Win64|ARM]
Visual Studio 12 2013 [Win64|ARM]
Visual Studio 14 2015 [Win64|ARM]
Visual Studio 15 2017 [Win64|ARM]
Green Hills MULTI
Xcode
"""
# -----------------------------------------------------------------------------
class Build:
"""Holds a build's settings"""
pfile = "cmany_preload.cmake"
def __init__(self, proj_root, build_root, install_root,
system, arch, buildtype, compiler, variant,
num_jobs):
self.generator = Generator.create_default(sys, arch, compiler, num_jobs)
self.system = system
self.architecture = arch
self.buildtype = buildtype
self.compiler = compiler
self.variant = variant
# self.crosscompile = (system != System.default())
# self.toolchain = None
self.projdir = chkf(proj_root)
self.buildroot = os.path.abspath(build_root)
self.builddir = os.path.abspath(os.path.join(build_root, self._cat("-", for_build_dir=True)))
self.preload_file = os.path.join(self.builddir, Build.pfile)
self.installroot = os.path.abspath(install_root)
self.installdir = os.path.join(self.installroot, self._cat("-"))
def __repr__(self):
return self._cat("-")
def _cat(self, sep, for_build_dir):
if self.compiler.is_msvc and for_build_dir:
s = "{1}{0}{2}{0}{3}"
s = s.format(sep, self.system, self.architecture, self.compiler)
else:
s = "{1}{0}{2}{0}{3}{0}{4}"
s = s.format(sep, self.system, self.architecture, self.compiler, self.buildtype)
if self.variant:
s += "{0}{1}".format(sep, self.variant)
return s
def create_dir(self):
if not os.path.exists(self.builddir):
os.makedirs(self.builddir)
def configure(self):
self.create_dir()
self.create_preload_file()
with setcwd(self.builddir):
cmd = (['cmake', '-C', os.path.basename(self.preload_file),]
+ self.generator.configure_args(self) +
[# '-DCMAKE_TOOLCHAIN_FILE='+toolchain_file,
self.projdir])
runsyscmd(cmd, echo_output=True)
with open("cmany_configure.done", "w") as f:
f.write(" ".join(cmd) + "\n")
def build(self, targets = []):
self.create_dir()
with setcwd(self.builddir):
if not os.path.exists("cmany_configure.done"):
self.configure()
if self.compiler.is_msvc and len(targets) == 0:
targets = ["ALL_BUILD"]
cmd = self.generator.cmd(targets, self)
runsyscmd(cmd, echo_output=True)
with open("cmany_build.done", "w") as f:
f.write(" ".join(cmd) + "\n")
def install(self):
self.create_dir()
with setcwd(self.builddir):
if not os.path.exists("cmany_build.done"):
self.build()
cmd = self.generator.install(self)
print(cmd)
runsyscmd(cmd, echo_output=True)
def clean(self):
self.create_dir()
with setcwd(self.builddir):
cmd = self.generator.cmd(['clean'], self)
runsyscmd(cmd, echo_output=True)
os.remove("cmany_build.done")
def getvars(self, varlist):
vlist = [v + ':' for v in varlist]
values = odict()
rx = r'(^.*?)=(.*)$'
with setcwd(self.builddir, silent=True):
with open('CMakeCache.txt') as f:
for line in f:
for v in vlist:
if line.startswith(v):
ls = line.strip()
vt = re.sub(rx, r'\1', ls)
values[vt] = re.sub(rx, r'\2', ls)
return values
def _gather_flags(self):
# flags = self.generator.compile_flags()
# flags += self.compiler.
# return flags
return []
def create_preload_file(self):
# http://stackoverflow.com/questions/17597673/cmake-preload-script-for-cache
self.create_dir()
lines = []
def _s(var, value, type): lines.append('_cmany_set({} "{}" {})'.format(var, value, type))
def s(var, value): _s(var, value, "STRING")
def p(var, value): _s(var, re.sub(r'\\', '/', value), "PATH")
def f(var, value): _s(var, re.sub(r'\\', '/', value), "FILEPATH")
p("CMAKE_INSTALL_PREFIX", self.installdir)
if not self.generator.is_msvc:
f("CMAKE_CXX_COMPILER", self.compiler.path)
f("CMAKE_C_COMPILER", self.compiler.c_compiler)
s("CMAKE_BUILD_TYPE", self.buildtype)
flags = self._gather_flags()
if flags:
s('CMAKE_CXX_FLAGS', " ".join(flags))
now = datetime.now().strftime("%Y/%m/%d %H:%m")
txt = __class__.preload_file_tpl.format(date=now, vars="\n".join(lines))
with open(self.preload_file, "w") as f:
f.write(txt)
return self.preload_file
preload_file_tpl = """# Do not edit. Will be overwritten.
# Generated by cmany on {date}
if(NOT _cmany_def)
set(_cmany_def ON)
function(_cmany_set var value type)
set(${{var}} "${{value}}" CACHE ${{type}} "")
message(STATUS "cmany: ${{var}}=${{value}}")
endfunction(_cmany_set)
endif(NOT _cmany_def)
message(STATUS "cmany:preload----------------------")
{vars}
message(STATUS "cmany:preload----------------------")
# Do not edit. Will be overwritten.
# Generated by cmany on {date}
"""
# -----------------------------------------------------------------------------
class ProjectConfig:
# @staticmethod
# def default_systems():
# return ctor(System, ["linux", "windows", "android", "ios", "ps4", "xboxone"])
# @staticmethod
# def default_architectures():
# return ctor(Architecture, ["x86", "x86_64", "arm"])
# @staticmethod
# def default_buildtypes():
# return ctor(BuildType, ["Debug", "Release"])
# @staticmethod
# def default_compilers():
# return ctor(Compiler, ["clang++", "g++", "icpc"])
# # no default variants
def __init__(self, **kwargs):
projdir = kwargs.get('proj_dir', os.getcwd())
self.rootdir = os.getcwd() if projdir == "." else projdir
self.cmakelists = chkf(self.rootdir, "CMakeLists.txt")
self.builddir = kwargs.get('build_dir', os.path.join(os.getcwd(), "build"))
self.installdir = kwargs.get('install_dir', os.path.join(os.getcwd(), "install"))
def _get(name, class_):
g = kwargs.get(name)
if g is None or not g:
g = [class_.default()] if class_ is not None else [None]
return g
l = []
for i in g:
l.append(class_(i))
return l
self.systems = _get('systems', System)
self.architectures = _get('architectures', Architecture)
self.buildtypes = _get('build_types', BuildType)
self.compilers = _get('compilers', Compiler)
self.variants = _get('variants', None)
#self.generator = Generator(kwargs.get('generator'))
self.num_jobs = kwargs.get('jobs')
configfile = os.path.join(projdir, "cmany.json")
self.configfile = None
if os.path.exists(configfile):
self.parse_file(configfile)
self.configfile = configfile
self.builds = []
for s in self.systems:
for a in self.architectures:
for c in self.compilers:
for m in self.buildtypes:
for v in self.variants:
self.add_build_if_valid(s, a, m, c, v)
def parse_file(self, configfile):
raise Exception("not implemented")
def add_build_if_valid(self, system, arch, buildtype, compiler, variant):
if not self.is_valid(system, arch, buildtype, compiler, variant):
return False
b = Build(self.rootdir, self.builddir, self.installdir,
system, arch, buildtype, compiler, variant,
self.num_jobs)
self.builds.append(b)
return True
def is_valid(self, sys, arch, mode, compiler, variant):
# TODO
return True
def select(self, **kwargs):
out = [b for b in self.builds]
def _h(li, kw, attr):
g = kwargs.get(kw)
if g is None:
return li
else:
lo = []
for b in li:
if str(getattr(b, attr)) == g:
lo.append(b)
return lo
out = _h(out, "sys", "system")
out = _h(out, "arch", "architecture")
out = _h(out, "buildtype", "buildtype")
out = _h(out, "compiler", "compiler")
out = _h(out, "variant", "variant")
return out
def create_tree(self, **restrict_to):
builds = self.select_and_show(**restrict_to)
for b in builds:
b.create_dir()
b.create_preload_file()
# print(b, ":", d)
def configure(self, **restrict_to):
if not os.path.exists(self.builddir):
os.makedirs(self.builddir)
self._execute(Build.configure, "Configuring", silent=False, **restrict_to)
def build(self, **restrict_to):
self._execute(Build.build, "Building", silent=False, **restrict_to)
def clean(self, **restrict_to):
self._execute(Build.clean, "Cleaning", silent=False, **restrict_to)
def install(self, **restrict_to):
self._execute(Build.install, "Installing", silent=False, **restrict_to)
def showvars(self, varlist, **restrict_to):
varv = odict()
def getv(build):
for k,v in Build.getvars(build, varlist).items():
sk = str(k)
if not varv.get(sk): varv[sk] = odict()
varv[sk][str(build)] = v
self._execute(getv, "", silent=True, **restrict_to)
for var,sysvalues in varv.items():
for s,v in sysvalues.items():
print("{}='{}' ({})".format(var, v, s))
def _execute(self, fn, msg, silent, **restrict_to):
builds = self.select(**restrict_to)
num = len(builds)
if not silent:
if num > 0:
print("selected builds:")
for b in builds:
print(b)
else:
print("no builds selected")
if num == 0:
return
if not silent:
print("")
print("===============================================")
if num > 1:
print(msg + ": start", num, "builds")
print("===============================================")
for i, b in enumerate(builds):
if not silent:
print("\n")
print("-----------------------------------------------")
if num > 1:
print(msg + ": build #{} of {}:".format(i+1, num), b)
else:
print(msg, b)
print("-----------------------------------------------")
fn(b)
if not silent:
if num > 1:
print("-----------------------------------------------")
print(msg + ": finished", num, "builds")
print("===============================================")
cmany: reformat
#!/usr/bin/env python3
import os
import sys
import re
import glob
from datetime import datetime
from collections import OrderedDict as odict
from multiprocessing import cpu_count as cpu_count
from . import util
from .cmake_sysinfo import CMakeSysInfo, getcachevars
from . import vsinfo
if sys.version_info < (3, 3):
# this is because of subprocess. That code is in c4/cmany/util.py.
msg = 'cmany requires at least Python 3.3. Current version is {}. Sorry.'
sys.exit(msg.format(sys.version_info))
# -----------------------------------------------------------------------------
class BuildItem:
"""A base class for build items."""
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def __str__(self):
return self.name
# -----------------------------------------------------------------------------
class BuildType(BuildItem):
"""Specifies a build type, ie, one of Release, Debug, etc"""
@staticmethod
def default():
return BuildType("Release")
# -----------------------------------------------------------------------------
class System(BuildItem):
"""Specifies an operating system"""
@staticmethod
def default():
"""return the current operating system"""
return System(__class__.default_str())
@staticmethod
def default_str():
s = CMakeSysInfo.system_name()
if s == "mac os x" or s == "Darwin":
s = "mac"
return s
# -----------------------------------------------------------------------------
class Architecture(BuildItem):
"""Specifies a processor architecture"""
@staticmethod
def default():
"""return the architecture of the current machine"""
return Architecture(__class__.default_str())
@staticmethod
def default_str():
# s = CMakeSysInfo.architecture()
# if s == "amd64":
# s = "x86_64"
# return s
if util.in_64bit():
return "x86_64"
elif util.in_32bit():
return "x86"
@property
def is64(self):
def fn():
s = re.search('64', self.name)
return s is not None
return util.cacheattr(self, "_is64", fn)
@property
def is32(self):
return not self.is64
@property
def is_arm(self):
return "arm" in self.name.lower()
# -----------------------------------------------------------------------------
class CompileOptions:
def __init__(self, name=""):
self.name = name
self.cmake_flags = []
self.cflags = []
self.lflags = []
self.macros = []
def merge(self, other):
"""other will take precedence, ie, their options will come last"""
c = CompileOptions()
c.name = self.name+"+"+other.name
c.cmake_flags = self.cmake_flags + other.cmake_flags
c.cflags = self.cflags + other.cflags
c.lflags = self.lflags + other.lflags
c.macros = self.macros + other.macros
return c
# -----------------------------------------------------------------------------
class Compiler(BuildItem):
"""Specifies a compiler"""
@staticmethod
def default():
return Compiler(__class__.default_str())
@staticmethod
def default_str():
if str(System.default()) != "windows":
cpp = CMakeSysInfo.cxx_compiler()
else:
vs = vsinfo.find_any()
cpp = vs.name if vs is not None else CMakeSysInfo.cxx_compiler()
return cpp
def __init__(self, path):
if path.startswith("vs") or path.startswith("Visual Studio"):
vs = vsinfo.VisualStudioInfo(path)
self.vs = vs
path = vs.cxx_compiler
else:
p = util.which(path)
if p is None:
raise Exception("compiler not found: " + path)
if p != path:
print("compiler: selected {} for {}".format(p, path))
path = os.path.abspath(p)
name, version, version_full = self.get_version(path)
self.shortname = name
self.gcclike = self.shortname in ('gcc', 'clang', 'icc')
self.is_msvc = self.shortname.startswith('vs')
if not self.is_msvc:
name += version
self.path = path
self.version = version
self.version_full = version_full
super().__init__(name)
self.c_compiler = __class__.get_c_compiler(self.shortname, self.path)
@staticmethod
def get_c_compiler(shortname, cxx_compiler):
# if cxx_compiler.endswith("c++") or cxx_compiler.endswith('c++.exe'):
# cc = re.sub(r'c\+\+', r'cc', cxx_compiler)
if shortname == "icc":
cc = re.sub(r'icpc', r'icc', cxx_compiler)
elif shortname == "gcc":
cc = re.sub(r'g\+\+', r'gcc', cxx_compiler)
elif shortname == "clang":
cc = re.sub(r'clang\+\+', r'clang', cxx_compiler)
else:
cc = cxx_compiler
return cc
def get_version(self, path):
def slntout(cmd):
out = util.runsyscmd(cmd, echo_cmd=False,
echo_output=False, capture_output=True)
out = out.strip("\n")
return out
# is this visual studio?
if hasattr(self, "vs"):
return self.vs.name, str(self.vs.year), self.vs.name
# # other compilers
# print("cmp: found compiler:", name, path)
out = slntout([path, '--version'])
version_full = out.split("\n")[0]
splits = version_full.split(" ")
name = splits[0].lower()
# print("cmp: version:", name, "---", firstline, "---")
vregex = r'(\d+\.\d+)\.\d+'
if name.startswith("g++") or name.startswith("gcc"):
name = "gcc"
version = slntout([path, '-dumpversion'])
version = re.sub(vregex, r'\1', version)
# print("gcc version:", version, "---")
elif name.startswith("clang"):
name = "clang"
version = re.sub(r'clang version ' + vregex + '.*', r'\1', version_full)
# print("clang version:", version, "---")
elif name.startswith("icpc"):
name = "icc"
version = re.sub(r'icpc \(ICC\) ' + vregex + '.*', r'\1', version_full)
# print("icc version:", version, "---")
else:
version = slntout([path, '--dumpversion'])
version = re.sub(vregex, r'\1', version)
#
return name, version, version_full
# -----------------------------------------------------------------------------
class Variant(BuildItem):
"""for variations in compile options"""
def __init__(self, name):
super().__init__(name)
self.options = CompileOptions()
# -----------------------------------------------------------------------------
class Generator(BuildItem):
"""Visual Studio aliases example:
vs2013: use the bitness of the current system
vs2013_32: use 32bit version
vs2013_64: use 64bit version
"""
@staticmethod
def default():
return Generator(__class__.default_str(), cpu_count())
@staticmethod
def default_str():
s = CMakeSysInfo.generator()
return s
@staticmethod
def create_default(system, arch, compiler, num_jobs):
if not compiler.is_msvc:
if System.default_str() == "windows":
return Generator("Unix Makefiles", num_jobs)
else:
return Generator(__class__.default_str(), num_jobs)
else:
return Generator(compiler.name, num_jobs)
@staticmethod
def resolve_alias(gen):
if gen.startswith('vs') or gen.startswith('Visual Studio'):
return vsinfo.to_gen(gen)
return gen
def __init__(self, name, num_jobs):
if name.startswith('vs'):
name = vsinfo.to_gen(name)
self.alias = name
super().__init__(name)
self.num_jobs = num_jobs
self.is_makefile = name.endswith("Makefiles")
self.is_ninja = name.endswith("Ninja")
self.is_msvc = name.startswith("Visual Studio")
def configure_args(self, build):
if self.name != "":
if self.is_msvc and build.compiler.vs.toolset is not None:
return ['-G', '"{}"'.format(self.name), '-T', build.compiler.vs.toolset]
else:
return ['-G', '"{}"'.format(self.name)]
else:
return []
def cmd(self, targets, build):
if self.is_makefile:
return ['make', '-j', str(self.num_jobs)] + targets
elif self.is_msvc:
if not hasattr(self, "sln"):
sln_files = glob.glob("*.sln")
if len(sln_files) != 1:
raise Exception("there's more than one solution file in the project folder")
self.sln = sln_files[0]
return [build.compiler.vs.msbuild, self.sln,
'/maxcpucount:'+str(self.num_jobs),
'/property:Configuration='+str(build.buildtype),
'/target:'+';'.join(targets)]
else:
bt = str(build.buildtype)
return (['cmake', '--build', '.', '--config', bt] +
['--target ' + t for t in targets])
def install(self, build):
bt = str(build.buildtype)
return ['cmake', '--build', '.', '--config', bt, '--target', 'install']
"""
generators: https://cmake.org/cmake/help/v3.7/manual/cmake-generators.7.html
Unix Makefiles
MSYS Makefiles
MinGW Makefiles
NMake Makefiles
Ninja
Watcom WMake
CodeBlocks - Ninja
CodeBlocks - Unix Makefiles
CodeBlocks - MinGW Makefiles
CodeBlocks - NMake Makefiles
CodeLite - Ninja
CodeLite - Unix Makefiles
CodeLite - MinGW Makefiles
CodeLite - NMake Makefiles
Eclipse CDT4 - Ninja
Eclipse CDT4 - Unix Makefiles
Eclipse CDT4 - MinGW Makefiles
Eclipse CDT4 - NMake Makefiles
KDevelop3
KDevelop3 - Unix Makefiles
Kate - Ninja
Kate - Unix Makefiles
Kate - MinGW Makefiles
Kate - NMake Makefiles
Sublime Text 2 - Ninja
Sublime Text 2 - Unix Makefiles
Sublime Text 2 - MinGW Makefiles
Sublime Text 2 - NMake Makefiles
Visual Studio 6
Visual Studio 7
Visual Studio 7 .NET 2003
Visual Studio 8 2005 [Win64|IA64]
Visual Studio 9 2008 [Win64|IA64]
Visual Studio 10 2010 [Win64|IA64]
Visual Studio 11 2012 [Win64|ARM]
Visual Studio 12 2013 [Win64|ARM]
Visual Studio 14 2015 [Win64|ARM]
Visual Studio 15 2017 [Win64|ARM]
Green Hills MULTI
Xcode
"""
# -----------------------------------------------------------------------------
class Build:
"""Holds a build's settings"""
pfile = "cmany_preload.cmake"
def __init__(self, proj_root, build_root, install_root,
system, arch, buildtype, compiler, variant,
num_jobs):
self.generator = Generator.create_default(sys, arch, compiler, num_jobs)
self.system = system
self.architecture = arch
self.buildtype = buildtype
self.compiler = compiler
self.variant = variant
# self.crosscompile = (system != System.default())
# self.toolchain = None
self.tag = self._cat('-')
self.projdir = util.chkf(proj_root)
self.buildroot = os.path.abspath(build_root)
self.buildtag = self.tag
self.builddir = os.path.abspath(os.path.join(build_root, self.buildtag))
self.preload_file = os.path.join(self.builddir, Build.pfile)
self.installroot = os.path.abspath(install_root)
self.installtag = self.tag
self.installdir = os.path.join(self.installroot, self.installtag)
def __repr__(self):
return self.tag
def _cat(self, sep):
s = "{1}{0}{2}{0}{3}{0}{4}"
s = s.format(sep, self.system, self.architecture, self.compiler, self.buildtype)
if self.variant:
s += "{0}{1}".format(sep, self.variant)
return s
def create_dir(self):
if not os.path.exists(self.builddir):
os.makedirs(self.builddir)
def configure(self):
self.create_dir()
self.create_preload_file()
with util.setcwd(self.builddir):
cmd = (['cmake', '-C', os.path.basename(self.preload_file)]
+ self.generator.configure_args(self) +
[ # '-DCMAKE_TOOLCHAIN_FILE='+toolchain_file,
self.projdir])
util.runsyscmd(cmd, echo_output=True)
with open("cmany_configure.done", "w") as f:
f.write(" ".join(cmd) + "\n")
def build(self, targets=[]):
self.create_dir()
with util.setcwd(self.builddir):
if not os.path.exists("cmany_configure.done"):
self.configure()
if self.compiler.is_msvc and len(targets) == 0:
targets = ["ALL_BUILD"]
cmd = self.generator.cmd(targets, self)
util.runsyscmd(cmd, echo_output=True)
with open("cmany_build.done", "w") as f:
f.write(" ".join(cmd) + "\n")
def install(self):
self.create_dir()
with util.setcwd(self.builddir):
if not os.path.exists("cmany_build.done"):
self.build()
cmd = self.generator.install(self)
print(cmd)
util.runsyscmd(cmd, echo_output=True)
def clean(self):
self.create_dir()
with util.setcwd(self.builddir):
cmd = self.generator.cmd(['clean'], self)
util.runsyscmd(cmd, echo_output=True)
os.remove("cmany_build.done")
def getvars(self, varlist):
return cmake_sysinfo.getcachevars(self.builddir, varlist)
def _gather_flags(self):
# flags = self.generator.compile_flags()
# flags += self.compiler.
# return flags
return []
def create_preload_file(self):
# http://stackoverflow.com/questions/17597673/cmake-preload-script-for-cache
self.create_dir()
lines = []
def _s(var, value, type): lines.append('_cmany_set({} "{}" {})'.format(var, value, type)) # nopep8
def s(var, value): _s(var, value, "STRING") # nopep8
def p(var, value): _s(var, re.sub(r'\\', '/', value), "PATH") # nopep8
def f(var, value): _s(var, re.sub(r'\\', '/', value), "FILEPATH") # nopep8
p("CMAKE_INSTALL_PREFIX", self.installdir)
if not self.generator.is_msvc:
f("CMAKE_CXX_COMPILER", self.compiler.path)
f("CMAKE_C_COMPILER", self.compiler.c_compiler)
s("CMAKE_BUILD_TYPE", self.buildtype)
flags = self._gather_flags()
if flags:
s('CMAKE_CXX_FLAGS', " ".join(flags))
now = datetime.now().strftime("%Y/%m/%d %H:%m")
txt = __class__.preload_file_tpl.format(date=now, vars="\n".join(lines))
with open(self.preload_file, "w") as f:
f.write(txt)
return self.preload_file
preload_file_tpl = """# Do not edit. Will be overwritten.
# Generated by cmany on {date}
if(NOT _cmany_def)
set(_cmany_def ON)
function(_cmany_set var value type)
set(${{var}} "${{value}}" CACHE ${{type}} "")
message(STATUS "cmany: ${{var}}=${{value}}")
endfunction(_cmany_set)
endif(NOT _cmany_def)
message(STATUS "cmany:preload----------------------")
{vars}
message(STATUS "cmany:preload----------------------")
# Do not edit. Will be overwritten.
# Generated by cmany on {date}
"""
# -----------------------------------------------------------------------------
class ProjectConfig:
# @staticmethod
# def default_systems():
# return ctor(System, ["linux", "windows", "android", "ios", "ps4", "xboxone"])
# @staticmethod
# def default_architectures():
# return ctor(Architecture, ["x86", "x86_64", "arm"])
# @staticmethod
# def default_buildtypes():
# return ctor(BuildType, ["Debug", "Release"])
# @staticmethod
# def default_compilers():
# return ctor(Compiler, ["clang++", "g++", "icpc"])
# # no default variants
def __init__(self, **kwargs):
projdir = kwargs.get('proj_dir', os.getcwd())
self.rootdir = os.getcwd() if projdir == "." else projdir
self.cmakelists = util.chkf(self.rootdir, "CMakeLists.txt")
self.builddir = kwargs.get('build_dir', os.path.join(os.getcwd(), "build"))
self.installdir = kwargs.get('install_dir', os.path.join(os.getcwd(), "install"))
def _get(name, class_):
g = kwargs.get(name)
if g is None or not g:
g = [class_.default()] if class_ is not None else [None]
return g
l = []
for i in g:
l.append(class_(i))
return l
self.systems = _get('systems', System)
self.architectures = _get('architectures', Architecture)
self.buildtypes = _get('build_types', BuildType)
self.compilers = _get('compilers', Compiler)
self.variants = _get('variants', None)
# self.generator = Generator(kwargs.get('generator'))
self.num_jobs = kwargs.get('jobs')
configfile = os.path.join(projdir, "cmany.json")
self.configfile = None
if os.path.exists(configfile):
self.parse_file(configfile)
self.configfile = configfile
self.builds = []
for s in self.systems:
for a in self.architectures:
for c in self.compilers:
for m in self.buildtypes:
for v in self.variants:
self.add_build_if_valid(s, a, m, c, v)
def parse_file(self, configfile):
raise Exception("not implemented")
def add_build_if_valid(self, system, arch, buildtype, compiler, variant):
if not self.is_valid(system, arch, buildtype, compiler, variant):
return False
b = Build(self.rootdir, self.builddir, self.installdir,
system, arch, buildtype, compiler, variant,
self.num_jobs)
self.builds.append(b)
return True
def is_valid(self, sys, arch, mode, compiler, variant):
# TODO
return True
def select(self, **kwargs):
out = [b for b in self.builds]
def _h(li, kw, attr):
g = kwargs.get(kw)
if g is None:
return li
else:
lo = []
for b in li:
if str(getattr(b, attr)) == g:
lo.append(b)
return lo
out = _h(out, "sys", "system")
out = _h(out, "arch", "architecture")
out = _h(out, "buildtype", "buildtype")
out = _h(out, "compiler", "compiler")
out = _h(out, "variant", "variant")
return out
def create_tree(self, **restrict_to):
builds = self.select_and_show(**restrict_to)
for b in builds:
b.create_dir()
b.create_preload_file()
# print(b, ":", d)
def configure(self, **restrict_to):
if not os.path.exists(self.builddir):
os.makedirs(self.builddir)
self._execute(Build.configure, "Configuring", silent=False, **restrict_to)
def build(self, **restrict_to):
self._execute(Build.build, "Building", silent=False, **restrict_to)
def clean(self, **restrict_to):
self._execute(Build.clean, "Cleaning", silent=False, **restrict_to)
def install(self, **restrict_to):
self._execute(Build.install, "Installing", silent=False, **restrict_to)
def showvars(self, varlist, **restrict_to):
varv = odict()
def getv(build):
for k, v in Build.getvars(build, varlist).items():
sk = str(k)
if not varv.get(sk):
varv[sk] = odict()
varv[sk][str(build)] = v
self._execute(getv, "", silent=True, **restrict_to)
for var, sysvalues in varv.items():
for s, v in sysvalues.items():
print("{}='{}' ({})".format(var, v, s))
def _execute(self, fn, msg, silent, **restrict_to):
builds = self.select(**restrict_to)
num = len(builds)
if not silent:
if num > 0:
print("selected builds:")
for b in builds:
print(b)
else:
print("no builds selected")
if num == 0:
return
if not silent:
print("")
print("===============================================")
if num > 1:
print(msg + ": start", num, "builds")
print("===============================================")
for i, b in enumerate(builds):
if not silent:
print("\n")
print("-----------------------------------------------")
if num > 1:
print(msg + ": build #{} of {}:".format(i+1, num), b)
else:
print(msg, b)
print("-----------------------------------------------")
fn(b)
if not silent:
if num > 1:
print("-----------------------------------------------")
print(msg + ": finished", num, "builds")
print("===============================================")
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
import os
FREEZE_ROOT = getattr(
settings,
"FREEZE_ROOT",
os.path.abspath(os.path.join(settings.MEDIA_ROOT, "../freeze/")),
)
if not os.path.isabs(FREEZE_ROOT):
raise ImproperlyConfigured("settings.FREEZE_ROOT should be an absolute path")
if (
settings.MEDIA_ROOT.find(FREEZE_ROOT) == 0
or settings.STATIC_ROOT.find(FREEZE_ROOT) == 0
):
raise ImproperlyConfigured(
"settings.FREEZE_ROOT cannot be a subdirectory of MEDIA_ROOT or STATIC_ROOT"
)
FREEZE_MEDIA_ROOT = settings.MEDIA_ROOT
FREEZE_MEDIA_URL = settings.MEDIA_URL
FREEZE_STATIC_ROOT = settings.STATIC_ROOT
FREEZE_STATIC_URL = settings.STATIC_URL
FREEZE_USE_HTTPS = getattr(settings, "FREEZE_USE_HTTPS", False)
FREEZE_PROTOCOL = "https://" if FREEZE_USE_HTTPS else "http://"
FREEZE_SITE_URL = getattr(settings, "FREEZE_SITE_URL", None)
if FREEZE_SITE_URL == None:
# handled this way to remove DB dependency unless strictly needed. If FREEZE_SITE_URL is set then collectstatic
# can be called without needing a db setup, which is useful for build servers
FREEZE_SITE_URL = "%s%s" % (
FREEZE_PROTOCOL,
Site.objects.get_current().domain,
)
FREEZE_BASE_URL = getattr(settings, "FREEZE_BASE_URL", None)
if FREEZE_BASE_URL:
if FREEZE_BASE_URL.startswith("/") or FREEZE_BASE_URL.startswith("http"):
if not FREEZE_BASE_URL.endswith("/"):
FREEZE_BASE_URL += "/"
else:
raise ImproperlyConfigured(
"settings.FREEZE_BASE_URL should start with '/' or 'http' or be an empty string"
)
FREEZE_RELATIVE_URLS = getattr(settings, "FREEZE_RELATIVE_URLS", False)
if FREEZE_RELATIVE_URLS and FREEZE_BASE_URL != None:
raise ImproperlyConfigured(
"settings.FREEZE_RELATIVE_URLS cannot be set to True if FREEZE_BASE_URL is specified"
)
FREEZE_LOCAL_URLS = getattr(settings, "FREEZE_LOCAL_URLS", False)
if FREEZE_LOCAL_URLS and not FREEZE_RELATIVE_URLS:
raise ImproperlyConfigured(
"settings.FREEZE_LOCAL_URLS cannot be set to True if FREEZE_RELATIVE_URLS is set to False"
)
FREEZE_FOLLOW_SITEMAP_URLS = getattr(settings, "FREEZE_FOLLOW_SITEMAP_URLS", True)
FREEZE_FOLLOW_HTML_URLS = getattr(settings, "FREEZE_FOLLOW_HTML_URLS", True)
FREEZE_REPORT_INVALID_URLS = getattr(settings, "FREEZE_REPORT_INVALID_URLS", False)
FREEZE_REPORT_INVALID_URLS_SUBJECT = getattr(
settings, "FREEZE_REPORT_INVALID_URLS_SUBJECT", "[freeze] invalid urls"
)
FREEZE_INCLUDE_MEDIA = getattr(settings, "FREEZE_INCLUDE_MEDIA", True)
FREEZE_INCLUDE_STATIC = getattr(settings, "FREEZE_INCLUDE_STATIC", True)
FREEZE_ZIP_ALL = getattr(settings, "FREEZE_ZIP_ALL", False)
FREEZE_ZIP_NAME = getattr(settings, "FREEZE_ZIP_NAME", "freeze")
if len(FREEZE_ZIP_NAME) >= 4 and FREEZE_ZIP_NAME[-4:].lower() != ".zip":
FREEZE_ZIP_NAME += ".zip"
FREEZE_ZIP_PATH = os.path.abspath(os.path.join(FREEZE_ROOT, FREEZE_ZIP_NAME))
FREEZE_REQUEST_HEADERS = getattr(
settings, "FREEZE_REQUEST_HEADERS", {"user-agent": "django-freeze"}
)
Fix settings error is some settings are not defined.
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
import os
FREEZE_ROOT = getattr(
settings,
"FREEZE_ROOT",
os.path.abspath(os.path.join(settings.MEDIA_ROOT, "../freeze/")),
)
if not os.path.isabs(FREEZE_ROOT):
raise ImproperlyConfigured("settings.FREEZE_ROOT should be an absolute path")
media_root = settings.MEDIA_ROOT
static_root = settings.STATIC_ROOT
if (
media_root
and media_root.find(FREEZE_ROOT) == 0
or static_root
and static_root.find(FREEZE_ROOT) == 0
):
raise ImproperlyConfigured(
"settings.FREEZE_ROOT cannot be a subdirectory of MEDIA_ROOT or STATIC_ROOT"
)
FREEZE_MEDIA_ROOT = settings.MEDIA_ROOT
FREEZE_MEDIA_URL = settings.MEDIA_URL
FREEZE_STATIC_ROOT = settings.STATIC_ROOT
FREEZE_STATIC_URL = settings.STATIC_URL
FREEZE_USE_HTTPS = getattr(settings, "FREEZE_USE_HTTPS", False)
FREEZE_PROTOCOL = "https://" if FREEZE_USE_HTTPS else "http://"
FREEZE_SITE_URL = getattr(settings, "FREEZE_SITE_URL", None)
if FREEZE_SITE_URL == None:
# handled this way to remove DB dependency unless strictly needed. If FREEZE_SITE_URL is set then collectstatic
# can be called without needing a db setup, which is useful for build servers
FREEZE_SITE_URL = "%s%s" % (
FREEZE_PROTOCOL,
Site.objects.get_current().domain,
)
FREEZE_BASE_URL = getattr(settings, "FREEZE_BASE_URL", None)
if FREEZE_BASE_URL:
if FREEZE_BASE_URL.startswith("/") or FREEZE_BASE_URL.startswith("http"):
if not FREEZE_BASE_URL.endswith("/"):
FREEZE_BASE_URL += "/"
else:
raise ImproperlyConfigured(
"settings.FREEZE_BASE_URL should start with '/' or 'http' or be an empty string"
)
FREEZE_RELATIVE_URLS = getattr(settings, "FREEZE_RELATIVE_URLS", False)
if FREEZE_RELATIVE_URLS and FREEZE_BASE_URL != None:
raise ImproperlyConfigured(
"settings.FREEZE_RELATIVE_URLS cannot be set to True if FREEZE_BASE_URL is specified"
)
FREEZE_LOCAL_URLS = getattr(settings, "FREEZE_LOCAL_URLS", False)
if FREEZE_LOCAL_URLS and not FREEZE_RELATIVE_URLS:
raise ImproperlyConfigured(
"settings.FREEZE_LOCAL_URLS cannot be set to True if FREEZE_RELATIVE_URLS is set to False"
)
FREEZE_FOLLOW_SITEMAP_URLS = getattr(settings, "FREEZE_FOLLOW_SITEMAP_URLS", True)
FREEZE_FOLLOW_HTML_URLS = getattr(settings, "FREEZE_FOLLOW_HTML_URLS", True)
FREEZE_REPORT_INVALID_URLS = getattr(settings, "FREEZE_REPORT_INVALID_URLS", False)
FREEZE_REPORT_INVALID_URLS_SUBJECT = getattr(
settings, "FREEZE_REPORT_INVALID_URLS_SUBJECT", "[freeze] invalid urls"
)
FREEZE_INCLUDE_MEDIA = getattr(settings, "FREEZE_INCLUDE_MEDIA", True)
FREEZE_INCLUDE_STATIC = getattr(settings, "FREEZE_INCLUDE_STATIC", True)
FREEZE_ZIP_ALL = getattr(settings, "FREEZE_ZIP_ALL", False)
FREEZE_ZIP_NAME = getattr(settings, "FREEZE_ZIP_NAME", "freeze")
if len(FREEZE_ZIP_NAME) >= 4 and FREEZE_ZIP_NAME[-4:].lower() != ".zip":
FREEZE_ZIP_NAME += ".zip"
FREEZE_ZIP_PATH = os.path.abspath(os.path.join(FREEZE_ROOT, FREEZE_ZIP_NAME))
FREEZE_REQUEST_HEADERS = getattr(
settings, "FREEZE_REQUEST_HEADERS", {"user-agent": "django-freeze"}
)
|
# -*- coding: utf-8 -*-
"""
hyper/tls
~~~~~~~~~
Contains the TLS/SSL logic for use in hyper.
"""
import os.path as path
from .compat import ignore_missing, ssl
NPN_PROTOCOL = 'h2'
H2_NPN_PROTOCOLS = [NPN_PROTOCOL, 'h2-16', 'h2-15', 'h2-14']
SUPPORTED_NPN_PROTOCOLS = H2_NPN_PROTOCOLS + ['http/1.1']
# We have a singleton SSLContext object. There's no reason to be creating one
# per connection.
_context = None
# Work out where our certificates are.
cert_loc = path.join(path.dirname(__file__), 'certs.pem')
def wrap_socket(sock, server_hostname):
"""
A vastly simplified SSL wrapping function. We'll probably extend this to
do more things later.
"""
global _context
if _context is None: # pragma: no cover
_context = _init_context()
# the spec requires SNI support
ssl_sock = _context.wrap_socket(sock, server_hostname=server_hostname)
# Setting SSLContext.check_hostname to True only verifies that the
# post-handshake servername matches that of the certificate. We also need
# to check that it matches the requested one.
if _context.check_hostname: # pragma: no cover
try:
ssl.match_hostname(ssl_sock.getpeercert(), server_hostname)
except AttributeError:
ssl.verify_hostname(ssl_sock, server_hostname) # pyopenssl
proto = None
with ignore_missing():
proto = ssl_sock.selected_npn_protocol()
return (ssl_sock, proto)
def _init_context():
"""
Creates the singleton SSLContext we use.
"""
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.set_default_verify_paths()
context.load_verify_locations(cafile=cert_loc)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
with ignore_missing():
context.set_npn_protocols(SUPPORTED_NPN_PROTOCOLS)
# required by the spec
context.options |= ssl.OP_NO_COMPRESSION
return context
Make ALPN calls.
# -*- coding: utf-8 -*-
"""
hyper/tls
~~~~~~~~~
Contains the TLS/SSL logic for use in hyper.
"""
import os.path as path
from .compat import ignore_missing, ssl
NPN_PROTOCOL = 'h2'
H2_NPN_PROTOCOLS = [NPN_PROTOCOL, 'h2-16', 'h2-15', 'h2-14']
SUPPORTED_NPN_PROTOCOLS = H2_NPN_PROTOCOLS + ['http/1.1']
# We have a singleton SSLContext object. There's no reason to be creating one
# per connection.
_context = None
# Work out where our certificates are.
cert_loc = path.join(path.dirname(__file__), 'certs.pem')
def wrap_socket(sock, server_hostname):
"""
A vastly simplified SSL wrapping function. We'll probably extend this to
do more things later.
"""
global _context
if _context is None: # pragma: no cover
_context = _init_context()
# the spec requires SNI support
ssl_sock = _context.wrap_socket(sock, server_hostname=server_hostname)
# Setting SSLContext.check_hostname to True only verifies that the
# post-handshake servername matches that of the certificate. We also need
# to check that it matches the requested one.
if _context.check_hostname: # pragma: no cover
try:
ssl.match_hostname(ssl_sock.getpeercert(), server_hostname)
except AttributeError:
ssl.verify_hostname(ssl_sock, server_hostname) # pyopenssl
proto = None
# ALPN is newer, so we prefer it over NPN. The odds of us getting different
# answers is pretty low, but let's be sure.
with ignore_missing():
proto = ssl_sock.selected_alpn_protocol()
with ignore_missing():
if proto is None:
proto = ssl_sock.selected_npn_protocol()
return (ssl_sock, proto)
def _init_context():
"""
Creates the singleton SSLContext we use.
"""
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.set_default_verify_paths()
context.load_verify_locations(cafile=cert_loc)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
with ignore_missing():
context.set_npn_protocols(SUPPORTED_NPN_PROTOCOLS)
with ignore_missing():
context.set_alpn_protocols(SUPPORTED_NPN_PROTOCOLS)
# required by the spec
context.options |= ssl.OP_NO_COMPRESSION
return context
|
import web
import yaml
#from .paths import PREFIX
def wmt_conf(**kwds):
prefix = kwds.default('prefix', PREFIX)
database_dir = kwds.default('database_dir', os.path.join(prefix, 'db'))
from string import Template
contents = Template("""
wmt:
database_dir: ${prefix}/db
data_dir: ${prefix}/data
""")
return contents.substitute(prefix=prefix)
def load_wmt_conf(file):
with open(file, 'r') as conf_file:
conf = yaml.load(conf_file.read())
return conf
def read_site_conf(site_prefix):
from ConfigParser import RawConfigParser
conf_file_path = os.path.join(site_prefix, 'conf', 'wmt.ini')
conf = RawConfigParser()
#with open(conf_file_path, 'r') as conf_file:
# conf = yaml.load(conf_file.read())
return conf
def read_config_file(path_to_file):
from ConfigParser import RawConfigParser
from passlib.context import CryptContext
config = RawConfigParser()
config.read(path_to_file)
site = dict(config.items('wmt'))
site['pw'] = CryptContext.from_path(path_to_file, section='passlib')
return site
def write_config_file(path_to_file, items):
from ConfigParser import RawConfigParser
config = RawConfigParser()
for item in items:
conf.set('wmt', *items)
config.write(path_to_file)
def read_site_config_file(*args):
import os
try:
prefix = args[0]
except IndexError:
try:
prefix = os.environ['WMT_PREFIX']
except KeyError:
raise
os.environ['WMT_PREFIX'] = os.path.abspath(prefix)
path = os.path.join(os.environ['WMT_PREFIX'], 'conf', 'wmt.ini')
return read_config_file(path)
site = read_site_config_file()
db = web.database(dbn='sqlite', db=site['database'])
Removed unused functions.
import web
def read_config_file(path_to_file):
from ConfigParser import RawConfigParser
from passlib.context import CryptContext
config = RawConfigParser()
config.read(path_to_file)
site = dict(config.items('wmt'))
site['pw'] = CryptContext.from_path(path_to_file, section='passlib')
return site
def read_site_config_file(*args):
import os
try:
prefix = args[0]
except IndexError:
try:
prefix = os.environ['WMT_PREFIX']
except KeyError:
raise
os.environ['WMT_PREFIX'] = os.path.abspath(prefix)
path = os.path.join(os.environ['WMT_PREFIX'], 'conf', 'wmt.ini')
return read_config_file(path)
site = read_site_config_file()
db = web.database(dbn='sqlite', db=site['database'])
|
# -*- coding: utf-8 -*-
'''
This is a library for network oriented, coroutine based programming.
The interfaces and events/operations aim to mimic thread features. Coroutines
work as simple generators, the operations and events work as objects passed in
and out of the generator, these objects are managed by the scheduler/network
poller.
Check each modules for specific help.
'''
__license__ = '''
Copyright (c) 2007, Mărieş Ionel Cristian
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
__author__ = "Mărieş Ionel Cristian"
__email__ = "ionel dot mc at gmail dot com"
__revision__ = "$Revision$"
__version__ = '0.1.8'
__svnid__ = "$Id$"
from cogen import core
from cogen import common
from cogen import web
added a bom in __init__.py
# -*- coding: utf-8 -*-
'''
This is a library for network oriented, coroutine based programming.
The interfaces and events/operations aim to mimic thread features. Coroutines
work as simple generators, the operations and events work as objects passed in
and out of the generator, these objects are managed by the scheduler/network
poller.
Check each modules for specific help.
'''
__license__ = '''
Copyright (c) 2007, Mărieş Ionel Cristian
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
__author__ = u"Mărieş Ionel Cristian"
__email__ = "ionel dot mc at gmail dot com"
__revision__ = "$Revision$"
__version__ = '0.1.8'
__svnid__ = "$Id$"
from cogen import core
from cogen import common
from cogen import web |
from __future__ import annotations
import asyncio
import copy
import re
from asyncio import subprocess
from collections import deque
from collections.abc import Awaitable, Callable
from datetime import datetime, timedelta
from hashlib import md5
from io import BytesIO
from itertools import groupby
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import IO, Any, Optional, TypeVar, Union, overload
from zipfile import ZipFile
import aiohttp
import discord
import toml
from discord import CategoryChannel, File, Message, TextChannel
from discord.ext import commands
from discord.ext.commands import BadUnionArgument, ChannelNotFound, Cog
from discord.utils import snowflake_time, time_snowflake
from lxml import etree
from bot import BeattieBot
from context import BContext
from schema.crosspost import Crosspost as CrosspostSettings
from schema.crosspost import CrosspostMessage, Table
from utils.checks import is_owner_or
from utils.contextmanagers import get as get_
from utils.etc import display_bytes, remove_spoilers
from utils.exceptions import ResponseError
_IO = TypeVar("_IO", bound=IO[bytes])
TWITTER_URL_EXPR = re.compile(
r"https?://(?:(?:www|mobile|m)\.)?(twitter\.com/[^\s/]+/status/\d+)"
)
TWEET_SELECTOR = ".//div[contains(@class, 'permalink-tweet')]"
TWITTER_IMG_SELECTOR = ".//img[@data-aria-label-part]"
TWITTER_IS_GIF = ".//div[contains(@class, 'PlayableMedia--gif')]"
PIXIV_URL_EXPR = re.compile(
r"https?://(?:www\.)?pixiv\.net/(?:member_illust\.php\?"
r"[\w]+=[\w]+(?:&[\w]+=[\w]+)*|(?:\w{2}/)?artworks/\d+(?:#\w*)?)"
)
HICCEARS_URL_EXPR = re.compile(
r"https?://(?:www\.)?hiccears\.com/(?:(?:gallery)|(?:picture))\.php\?[gp]id=\d+"
)
HICCEARS_IMG_SELECTOR = ".//a[contains(@href, 'imgs')]"
HICCEARS_THUMB_SELECTOR = ".//img[contains(@src, 'thumbnails')]"
TUMBLR_URL_EXPR = re.compile(r"https?://[\w-]+\.tumblr\.com/post/\d+")
TUMBLR_IMG_SELECTOR = ".//meta[@property='og:image']"
MASTODON_URL_EXPR = re.compile(r"https?://\S+/[\w-]+/?(?:>|$|\s)")
MASTODON_URL_GROUPS = re.compile(r"https?://([^\s/]+)(?:/.+)+/([\w-]+)")
MASTODON_API_FMT = "https://{}/api/v1/statuses/{}"
INKBUNNY_URL_EXPR = re.compile(
r"https?://(?:www\.)?inkbunny\.net/s/(\d+)(?:-p\d+-)?(?:#.*)?"
)
INKBUNNY_API_FMT = "https://inkbunny.net/api_{}.php"
IMGUR_URL_EXPR = re.compile(r"https?://(?:www\.)?imgur\.com/(?:a|gallery)/(\w+)")
MESSAGE_CACHE_TTL: int = 60 * 60 * 24 # one day in seconds
async def try_wait_for(
proc: asyncio.subprocess.Process, *, timeout: int = 180, kill_timeout: int = 15
) -> bytes:
try:
out, _err = await asyncio.wait_for(proc.communicate(), timeout=timeout)
except asyncio.TimeoutError:
await gently_kill(proc, timeout=kill_timeout)
raise
else:
return out
async def gently_kill(proc: asyncio.subprocess.Process, *, timeout: int):
proc.terminate()
try:
await asyncio.wait_for(proc.wait(), timeout=timeout)
except asyncio.TimeoutError:
proc.kill()
class Settings:
__slots__ = ("auto", "mode", "max_pages")
auto: Optional[bool]
mode: Optional[int]
max_pages: Optional[int]
def __init__(
self,
auto: bool = None,
mode: int = None,
max_pages: int = None,
) -> None:
self.auto = auto
self.mode = mode
self.max_pages = max_pages
def apply(self, other: Settings) -> Settings:
"""Returns a Settings with own values overwritten by non-None values of other"""
out = copy.copy(self)
auto, mode, max_pages = other.auto, other.mode, other.max_pages
if auto is not None:
out.auto = auto
if mode is not None:
out.mode = mode
if max_pages is not None:
out.max_pages = max_pages
return out
def asdict(self) -> dict[str, Any]:
return {k: v for k in self.__slots__ if (v := getattr(self, k)) is not None}
class Database:
def __init__(self, bot: BeattieBot):
self.db = bot.db
self.bot = bot
self.db.bind_tables(Table)
self._settings_cache: dict[tuple[int, int], Settings] = {}
self._expiry_deque: deque[int] = deque()
self._message_cache: dict[int, list[int]] = {}
bot.loop.create_task(self.__init())
async def __init(self) -> None:
await self.bot.wait_until_ready()
for table in (CrosspostSettings, CrosspostMessage):
await table.create(if_not_exists=True) # type: ignore
async with self.db.get_session() as s:
query = (
s.select(CrosspostMessage)
.where(
CrosspostMessage.invoking_message
> time_snowflake(
datetime.utcnow() - timedelta(seconds=MESSAGE_CACHE_TTL)
)
)
.order_by(CrosspostMessage.invoking_message)
)
for invoking_message, elems in groupby(
await (await query.all()).flatten(),
key=lambda elem: elem.invoking_message,
):
self._expiry_deque.append(invoking_message)
self._message_cache[invoking_message] = [
elem.sent_message for elem in elems
]
self._expiry_task = asyncio.create_task(self._expire())
async def _expire(self):
try:
while self._expiry_deque:
entry = self._expiry_deque.popleft()
until = snowflake_time(entry) + timedelta(seconds=MESSAGE_CACHE_TTL)
now = datetime.utcnow()
sleep_time = (until - now).total_seconds()
await asyncio.sleep(sleep_time)
self._message_cache.pop(entry, None)
except Exception as e:
import sys
import traceback
print("Exception in message cache expiry task", file=sys.stderr)
traceback.print_exception(type(e), e, e.__traceback__)
async def get_settings(self, message: Message) -> Settings:
guild = message.guild
assert guild is not None
channel = message.channel
assert isinstance(channel, TextChannel)
guild_id = guild.id
out = await self._get_settings(guild_id, 0)
if category := channel.category:
out = out.apply(await self._get_settings(guild_id, category.id))
out = out.apply(await self._get_settings(guild_id, message.channel.id))
return out
async def _get_settings(self, guild_id: int, channel_id: int) -> Settings:
try:
return self._settings_cache[(guild_id, channel_id)]
except KeyError:
async with self.db.get_session() as s:
query = s.select(CrosspostSettings).where(
(CrosspostSettings.guild_id == guild_id)
& (CrosspostSettings.channel_id == channel_id)
)
config = await query.first()
if config is None:
res = Settings()
else:
res = Settings(config.auto, config.mode, config.max_pages)
self._settings_cache[(guild_id, channel_id)] = res
return res
async def set_settings(
self, guild_id: int, channel_id: int, settings: Settings
) -> None:
self._settings_cache[(guild_id, channel_id)] = settings
kwargs = settings.asdict()
async with self.db.get_session() as s:
row = CrosspostSettings(
guild_id=guild_id,
channel_id=channel_id,
**kwargs,
)
query = s.insert.rows(row)
query = query.on_conflict(
CrosspostSettings.guild_id, CrosspostSettings.channel_id
).update(*(getattr(CrosspostSettings, key) for key in kwargs))
await query.run()
async def get_sent_messages(self, invoking_message: int) -> list[int]:
if sent_messages := self._message_cache.get(invoking_message):
return sent_messages
elif (
datetime.utcnow() - snowflake_time(invoking_message)
).total_seconds() > MESSAGE_CACHE_TTL - 3600: # an hour's leeway
async with self.db.get_session() as s:
query = s.select(CrosspostMessage).where(
CrosspostMessage.invoking_message == invoking_message
)
return [
elem.sent_message for elem in await (await query.all()).flatten()
]
else:
return []
async def add_sent_message(self, invoking_message: int, sent_message: int):
if (messages := self._message_cache.get(invoking_message)) is None:
messages = []
self._message_cache[invoking_message] = messages
self._expiry_deque.append(invoking_message)
messages.append(sent_message)
async with self.db.get_session() as s:
await s.add(
CrosspostMessage(
sent_message=sent_message, invoking_message=invoking_message
)
)
if self._expiry_task.done():
self._expiry_task = asyncio.create_task(self._expire())
async def del_sent_messages(self, invoking_message: int):
self._message_cache.pop(invoking_message, None)
async with self.db.get_session() as s:
await s.delete(CrosspostMessage).where(
CrosspostMessage.invoking_message == invoking_message
).run()
class CrosspostContext(BContext):
cog: Crosspost
async def send(self, content: object = None, **kwargs: Any) -> Message:
task = asyncio.create_task(
self._send(
content,
**kwargs,
)
)
try:
return await asyncio.shield(task)
except asyncio.CancelledError as e:
await asyncio.wait_for(task, timeout=None)
raise e from None
async def _send(
self,
content: object = None,
*,
file: File = None,
**kwargs: Any,
) -> Message:
if file:
fp = file.fp
assert isinstance(fp, BytesIO)
guild = self.guild
assert guild is not None
size = len(fp.getbuffer())
if size >= guild.filesize_limit:
content = f"Image too large to upload ({display_bytes(size)})."
file = None
msg = await super().send(
content,
file=file,
**kwargs,
)
await self.cog.db.add_sent_message(self.message.id, msg.id)
return msg
class Crosspost(Cog):
"""Crossposts images from tweets and other social media"""
bot: BeattieBot
hiccears_headers: dict[str, str] = {}
imgur_headers: dict[str, str] = {}
pixiv_headers: dict[str, str] = {
"App-OS": "ios",
"App-OS-Version": "10.3.1",
"App-Version": "6.7.1",
"User-Agent": "PixivIOSApp/6.7.1 (ios 10.3.1; iPhone8,1)",
}
inkbunny_sid: str = ""
ongoing_tasks: dict[int, asyncio.Task]
expr_dict: dict[re.Pattern, Callable[[CrosspostContext, str], Awaitable[bool]]]
def __init__(self, bot: BeattieBot):
self.bot = bot
self.db = Database(bot)
with open("config/headers.toml") as fp:
self.headers = toml.load(fp)
self.session = aiohttp.ClientSession(loop=bot.loop)
self.parser = etree.HTMLParser()
self.expr_dict = {
expr: getattr(self, f"display_{name.partition('_')[0].lower()}_images")
for name, expr in globals().items()
if name.endswith("URL_EXPR")
}
self.login_task = self.bot.loop.create_task(self.pixiv_login_loop())
self.init_task = bot.loop.create_task(self.__init())
if (ongoing_tasks := bot.extra.get("crosspost_ongoing_tasks")) is not None:
self.ongoing_tasks = ongoing_tasks
else:
self.ongoing_tasks = {}
bot.extra["crosspost_ongoing_tasks"] = self.ongoing_tasks
async def __init(self) -> None:
with open("config/logins.toml") as fp:
data = toml.load(fp)
imgur_id = data["imgur"]["id"]
self.imgur_headers["Authorization"] = f"Client-ID {imgur_id}"
self.hiccears_headers = data["hiccears"]
ib_login = data["inkbunny"]
url = INKBUNNY_API_FMT.format("login")
async with self.get(url, "POST", params=ib_login) as resp:
json = await resp.json()
self.inkbunny_sid = json["sid"]
def cog_check(self, ctx: BContext) -> bool:
return ctx.guild is not None
async def pixiv_login_loop(self) -> None:
url = "https://oauth.secure.pixiv.net/auth/token"
while True:
with open("config/logins.toml") as fp:
logins = toml.load(fp)
login = logins["pixiv"]
data = {
"get_secure_url": 1,
"client_id": "MOBrBDS8blbauoSck0ZfDbtuzpyT",
"client_secret": "lsACyCD94FhDUtGTXi3QzcFE2uU1hqtDaKeqrdwj",
}
if (token := login.get("refresh_token")) is not None:
data["grant_type"] = "refresh_token"
data["refresh_token"] = token
else:
data["grant_type"] = "password"
data["username"] = login["username"]
data["password"] = login["password"]
hash_secret = (
"28c1fdd170a5204386cb1313c7077b34f83e4aaf4aa829ce78c231e05b0bae2c"
)
now = datetime.now().isoformat()
headers = {
"X-Client-Time": now,
"X-Client-Hash": md5((now + hash_secret).encode("utf-8")).hexdigest(),
}
while True:
try:
async with self.get(
url,
"POST",
data=data,
use_default_headers=False,
headers=headers,
) as resp:
res = (await resp.json())["response"]
except Exception as e:
message = "An error occurred in the pixiv login loop"
self.bot.logger.exception(
message, exc_info=(type(e), e, e.__traceback__)
)
else:
break
self.pixiv_headers["Authorization"] = f'Bearer {res["access_token"]}'
login["refresh_token"] = res["refresh_token"]
with open("config/logins.toml", "w") as fp:
toml.dump(logins, fp)
await asyncio.sleep(res["expires_in"])
def cog_unload(self) -> None:
self.bot.loop.create_task(self.session.close())
self.login_task.cancel()
def get(
self,
url: str,
method: str = "GET",
*,
use_default_headers: bool = True,
**kwargs: Any,
) -> get_:
if use_default_headers:
kwargs["headers"] = {**self.headers, **kwargs.get("headers", {})}
return get_(self.session, url, method, **kwargs)
@overload
async def save(
self,
img_url: str,
*,
fp: None = ...,
seek_begin: bool = ...,
use_default_headers: bool = ...,
headers: Optional[dict[str, str]] = ...,
filesize_limit: Optional[int] = ...,
) -> BytesIO:
...
@overload
async def save(
self,
img_url: str,
*,
fp: _IO,
seek_begin: bool = ...,
use_default_headers: bool = ...,
headers: Optional[dict[str, str]] = ...,
filesize_limit: Optional[int] = ...,
) -> _IO:
...
async def save(
self,
img_url: str,
*,
fp=None,
seek_begin: bool = True,
use_default_headers: bool = True,
headers: dict[str, str] = None,
filesize_limit: int = None,
):
headers = headers or {}
img = fp or BytesIO()
length_checked = filesize_limit is None
async with self.get(
img_url, use_default_headers=use_default_headers, headers=headers
) as img_resp:
if not length_checked and img_resp.content_length is not None:
assert filesize_limit is not None
if img_resp.content_length > filesize_limit:
raise ResponseError(413) # yes I know that's not how this works
length_checked = True
async for chunk in img_resp.content.iter_any():
img.write(chunk)
if seek_begin:
img.seek(0)
if not length_checked:
assert filesize_limit is not None
if len(img.getbuffer()) > filesize_limit:
raise ResponseError(413)
return img
async def process_links(self, ctx: CrosspostContext) -> None:
content = remove_spoilers(ctx.message.content)
me = ctx.me
channel = ctx.channel
assert isinstance(me, discord.Member)
assert isinstance(channel, discord.TextChannel)
do_suppress = (
channel.permissions_for(me).manage_messages
and await self.get_mode(ctx) == 2
)
for expr, func in self.expr_dict.items():
for link in expr.findall(content):
try:
if await func(ctx, link) and do_suppress:
try:
await ctx.message.edit(suppress=True)
except (discord.NotFound, discord.Forbidden):
pass
do_suppress = False
except ResponseError as e:
if e.code == 404:
await ctx.send("Post not found.")
else:
await ctx.bot.handle_error(ctx, e)
except Exception as e:
await ctx.bot.handle_error(ctx, e)
@Cog.listener()
async def on_message(self, message: Message) -> None:
if (guild := message.guild) is None or message.author.bot:
return
channel = message.channel
me = guild.me
assert isinstance(channel, discord.TextChannel)
assert isinstance(me, discord.Member)
if not me.permissions_in(channel).send_messages:
return
if not (await self.db.get_settings(message)).auto:
return
if "http" not in message.content:
return
ctx = await self.bot.get_context(message, cls=CrosspostContext)
if ctx.command is None:
ctx.command = self.post
task = asyncio.create_task(self.process_links(ctx))
self.ongoing_tasks[message.id] = task
try:
await asyncio.wait_for(task, None)
except asyncio.CancelledError:
pass
except Exception as e:
raise e
finally:
del self.ongoing_tasks[message.id]
@Cog.listener()
async def on_raw_message_delete(
self, payload: discord.RawMessageDeleteEvent
) -> None:
async def delete_messages(messages: list[int]):
channel_id = payload.channel_id
for message_id in messages:
try:
await self.bot.http.delete_message(channel_id, message_id)
except discord.NotFound:
pass
except discord.Forbidden:
return
message_id = payload.message_id
messages_deleted = False
if task := self.ongoing_tasks.get(message_id):
task.cancel()
if messages := await self.db.get_sent_messages(message_id):
await delete_messages(messages)
messages_deleted = True
if task:
await asyncio.wait([task])
if messages:
await delete_messages(messages)
messages_deleted = True
if messages_deleted:
await self.db.del_sent_messages(message_id)
async def send(
self,
ctx: CrosspostContext,
link: str,
*,
headers: dict[str, str] = None,
use_default_headers: bool = True,
) -> None:
mode = await self.get_mode(ctx)
if mode == 1:
await ctx.send(link)
elif mode == 2:
img = await self.save(
link, headers=headers, use_default_headers=use_default_headers
)
filename = re.findall(r"[\w. -]+\.[\w. -]+", link)[-1]
file = File(img, filename)
await ctx.send(file=file)
else:
raise RuntimeError("Invalid crosspost mode!")
async def get_mode(self, ctx: BContext) -> int:
return (await self.db.get_settings(ctx.message)).mode or 1
async def get_max_pages(self, ctx: BContext) -> int:
settings = await self.db.get_settings(ctx.message)
max_pages = settings.max_pages
if max_pages is None:
max_pages = 4
return max_pages
async def display_twitter_images(self, ctx: CrosspostContext, link: str) -> bool:
if await self.get_mode(ctx) == 1:
return False
link = f"https://{link}"
async with self.get(link) as resp:
root = etree.fromstring(await resp.read(), self.parser)
try:
tweet = root.xpath(TWEET_SELECTOR)[0]
except IndexError:
await ctx.send("Failed to get tweet. Maybe the account is locked?")
return False
if imgs := tweet.xpath(TWITTER_IMG_SELECTOR):
for img in imgs:
url = img.get("src")
await self.send(ctx, f"{url}:orig")
return True
elif tweet.xpath(TWITTER_IS_GIF):
proc = await subprocess.create_subprocess_shell(
f"youtube-dl {link} -o - | "
"ffmpeg -i pipe:0 "
"-vf 'split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse,loop=-1' "
"-f gif pipe:1",
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
gif = BytesIO()
tweet_id = link.rpartition("/")[2].partition("?")[0]
filename = f"{tweet_id}.gif"
try:
stdout = await try_wait_for(proc)
except asyncio.TimeoutError:
await ctx.send("Gif took too long to process.")
return False
gif.write(stdout)
gif.seek(0)
file = File(gif, filename)
msg = await ctx.send(file=file)
return not msg.content.startswith("Image too large to upload")
else:
return False
async def display_pixiv_images(self, ctx: CrosspostContext, link: str) -> bool:
if "mode" in link:
link = re.sub(r"(?<=mode=)\w+", "medium", link)
elif "illust_id" in link:
link = f"{link}&mode=medium"
link = link.replace("http://", "https://")
if match := re.search(r"(?:illust_id=|artworks/)(\d+)", link):
illust_id = match.group(1)
else:
await ctx.send("Failed to find illust ID in pixiv link. This is a bug.")
return False
params = {"illust_id": illust_id}
url = "https://app-api.pixiv.net/v1/illust/detail"
async with self.get(
url, params=params, use_default_headers=False, headers=self.pixiv_headers
) as resp:
res = await resp.json()
try:
res = res["illust"]
except KeyError:
await ctx.send(
"This feature works sometimes, but isn't working right now!"
f"\nDebug info:\n{res.get('error')}"
)
return False
headers = {**self.pixiv_headers, "referer": link}
guild = ctx.guild
assert guild is not None
filesize_limit = guild.filesize_limit
content = None
if single := res["meta_single_page"]:
img_url = single["original_image_url"]
if "ugoira" in img_url:
try:
file = await self.get_ugoira(illust_id)
except asyncio.TimeoutError:
await ctx.send("Ugoira took too long to process.")
return False
else:
content, file = await self.save_pixiv(img_url, headers, filesize_limit)
await ctx.send(content, file=file)
elif multi := res["meta_pages"]:
# multi_image_post
urls = (page["image_urls"]["original"] for page in multi)
max_pages = await self.get_max_pages(ctx)
num_pages = len(multi)
if max_pages == 0:
max_pages = num_pages
tasks = [
self.bot.loop.create_task(
self.save_pixiv(img_url, headers, filesize_limit)
)
for img_url, _ in zip(urls, range(max_pages))
]
for task in tasks:
content, file = await task
await ctx.send(content, file=file)
remaining = num_pages - max_pages
if remaining > 0:
s = "s" if remaining > 1 else ""
message = (
f"{remaining} more image{s} at "
f"<https://www.pixiv.net/en/artworks/{illust_id}>"
)
await ctx.send(message)
else:
return False
return True
async def save_pixiv(
self, img_url: str, headers: dict[str, str], filesize_limit: int
) -> tuple[Optional[str], File]:
content = None
try:
img = await self.save(
img_url, headers=headers, filesize_limit=filesize_limit
)
except ResponseError as e:
if e.code == 413:
img_url = img_url.replace("img-original", "img-master")
head, _, _ext = img_url.rpartition(".")
img_url = f"{head}_master1200.jpg"
img = await self.save(img_url, headers=headers)
content = "Full size too large, standard resolution used."
else:
raise e from None
file = File(img, img_url.rpartition("/")[-1])
return content, file
async def get_ugoira(self, illust_id: str) -> File:
url = "https://app-api.pixiv.net/v1/ugoira/metadata"
params = {"illust_id": illust_id}
headers = self.pixiv_headers
async with self.get(
url, params=params, use_default_headers=False, headers=headers
) as resp:
res = (await resp.json())["ugoira_metadata"]
zip_url = res["zip_urls"]["medium"]
zip_url = re.sub(r"ugoira\d+x\d+", "ugoira1920x1080", zip_url)
headers = {
**self.pixiv_headers,
"referer": f"https://www.pixiv.net/en/artworks/{illust_id}",
}
zip_bytes = await self.save(zip_url, headers=headers, use_default_headers=False)
zfp = ZipFile(zip_bytes)
with TemporaryDirectory() as td:
tempdir = Path(td)
zfp.extractall(tempdir)
with open(tempdir / "durations.txt", "w") as fp:
for frame in res["frames"]:
duration = int(frame["delay"]) / 1000
fp.write(f"file '{frame['file']}'\nduration {duration}\n")
proc = await subprocess.create_subprocess_exec(
"ffmpeg",
"-i",
f"{tempdir}/%06d.jpg",
"-vf",
"palettegen",
f"{tempdir}/palette.png",
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
await proc.wait()
proc = await subprocess.create_subprocess_exec(
"ffmpeg",
"-f",
"concat",
"-safe",
"0",
"-i",
f"{tempdir}/durations.txt",
"-i",
f"{tempdir}/palette.png",
"-lavfi",
"paletteuse",
"-f",
"gif",
"pipe:1",
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
stdout = await try_wait_for(proc)
img = BytesIO(stdout)
img.seek(0)
name = f"{illust_id}.gif"
return File(img, name)
async def display_hiccears_images(self, ctx: CrosspostContext, link: str) -> bool:
async with self.get(link, headers=self.hiccears_headers) as resp:
root = etree.fromstring(await resp.read(), self.parser)
if single_image := root.xpath(HICCEARS_IMG_SELECTOR):
a = single_image[0]
href = a.get("href").lstrip(".")
url = f"https://{resp.host}{href}"
await self.send(ctx, url)
return True
thumbs = root.xpath(HICCEARS_THUMB_SELECTOR)
num_images = len(thumbs)
if num_images == 0:
await ctx.send(
"Hiccears login expired. <@!140293604726800385> needs to fix this. >:("
)
return False
max_pages = await self.get_max_pages(ctx)
if max_pages == 0:
max_pages = num_images
pages_remaining = num_images - max_pages
for thumb in thumbs[:max_pages]:
href = thumb.get("src").lstrip(".").replace("thumbnails", "imgs")[:-4]
url = f"https://{resp.host}{href}.jpg"
await self.send(ctx, url)
if pages_remaining > 0:
s = "s" if pages_remaining > 1 else ""
message = f"{pages_remaining} more image{s} at <{link}>"
await ctx.send(message)
return True
async def display_tumblr_images(self, ctx: CrosspostContext, link: str) -> bool:
idx = 1
async with self.get(link) as resp:
root = etree.fromstring(await resp.read(), self.parser)
if not str(resp.url).startswith(link): # explicit blog redirect
async with self.bot.session.get(
link
) as resp: # somehow this doesn't get redirected?
root = etree.fromstring(await resp.read(), self.parser)
idx = 0
images = root.xpath(TUMBLR_IMG_SELECTOR)
mode = await self.get_mode(ctx)
max_pages = await self.get_max_pages(ctx)
num_images = len(images)
if max_pages == 0:
max_pages = num_images
pages_remaining = num_images - max_pages
images = images[idx:max_pages]
if not images:
return False
for image in images:
url = image.get("content")
await self.send(ctx, url)
if mode == 1 and pages_remaining > 0:
s = "s" if pages_remaining > 1 else ""
message = f"{pages_remaining} more image{s} at <{link}>"
await ctx.send(message)
return True
async def display_mastodon_images(self, ctx: CrosspostContext, link: str) -> bool:
if (match := MASTODON_URL_GROUPS.match(link)) is None:
return False
api_url = MASTODON_API_FMT.format(*match.groups())
try:
async with self.get(api_url, use_default_headers=False) as resp:
post = await resp.json()
except (ResponseError, aiohttp.ClientError):
return False
if not (images := post.get("media_attachments")):
return False
mode = await self.get_mode(ctx)
idx = 0 if mode != 1 or post["sensitive"] else 1
all_embedded = True
for image in images[idx:]:
url = image["remote_url"] or image["url"]
if image.get("type") == "gifv":
with NamedTemporaryFile() as fp:
await self.save(
url, fp=fp, seek_begin=False, use_default_headers=False
)
proc = await asyncio.create_subprocess_exec(
"ffmpeg",
"-i",
fp.name,
"-vf",
"split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse,loop=-1",
"-f",
"gif",
"pipe:1",
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
try:
stdout = await try_wait_for(proc)
except asyncio.TimeoutError:
await ctx.send("Gif took too long to process.")
all_embedded = False
continue
img = BytesIO(stdout)
filename = f"{url.rpartition('/')[2]}.gif"
file = File(img, filename)
msg = await ctx.send(file=file)
if all_embedded and msg.content.startswith("Image too large to upload"):
all_embedded = False
else:
await self.send(ctx, url)
return all_embedded
async def display_inkbunny_images(self, ctx: CrosspostContext, sub_id: str) -> bool:
url = INKBUNNY_API_FMT.format("submissions")
params = {"sid": self.inkbunny_sid, "submission_ids": sub_id}
async with self.get(
url, "POST", use_default_headers=False, params=params
) as resp:
response = await resp.json()
sub = response["submissions"][0]
for file in sub["files"]:
url = file["file_url_full"]
await self.send(ctx, url)
return True
async def display_imgur_images(self, ctx: CrosspostContext, album_id: str) -> bool:
async with self.get(
f"https://api.imgur.com/3/album/{album_id}",
use_default_headers=False,
headers=self.imgur_headers,
) as resp:
data = await resp.json()
images = data["data"]["images"]
urls = (image["link"] for image in data["data"]["images"])
max_pages = await self.get_max_pages(ctx)
num_pages = len(images)
if max_pages == 0:
max_pages = num_pages
async def helper(link, n=1):
try:
await self.send(
ctx, link, headers=self.imgur_headers, use_default_headers=False
)
except ResponseError as e:
if e.code == 400 and n <= 10:
await asyncio.sleep(n)
await helper(link, n + 1)
else:
raise e
for img_url, _ in zip(urls, range(max_pages)):
await helper(img_url)
remaining = num_pages - max_pages
if remaining > 0:
s = "s" if remaining > 1 else ""
message = f"{remaining} more image{s} at <https://imgur.com/a/{album_id}>"
await ctx.send(message)
return True
@commands.command(hidden=True)
@is_owner_or(manage_guild=True)
async def twitter(self, ctx: BContext, enabled: Union[bool, str] = True) -> None:
await ctx.send(
"This command is deprecated! "
f"Please use `{ctx.prefix}crosspost` to manage settings."
)
@commands.group()
@is_owner_or(manage_guild=True)
async def crosspost(self, ctx: BContext) -> None:
"""Change image crosspost settings.
Each subcommand takes, in addition to the configuration value, an optional \
target, which specifies a channel or category to apply the setting to, instead of \
applying it to the guild as a whole."""
pass
@crosspost.command()
async def auto(
self,
ctx: BContext,
enabled: bool,
*,
target: Union[CategoryChannel, TextChannel] = None,
) -> None:
"""Enable or disable automatic crossposting."""
guild = ctx.guild
assert guild is not None
settings = Settings(auto=enabled)
await self.db.set_settings(guild.id, target.id if target else 0, settings)
fmt = "en" if enabled else "dis"
message = f"Crossposting images {fmt}abled"
if target is not None:
message = f"{message} in {target.mention}"
await ctx.send(f"{message}.")
@crosspost.command()
async def mode(
self,
ctx: BContext,
mode: str,
*,
target: Union[CategoryChannel, TextChannel] = None,
) -> None:
"""Change image crossposting mode.
link: send a link to images when available
upload: always upload image files
Fetching images from Twitter is disabled in link mode.
When in upload mode and the bot has the Manage Messages permission, it'll \
remove embeds from messages it processes successfully."""
if mode == "link":
crosspost_mode = 1
elif mode == "upload":
crosspost_mode = 2
else:
raise commands.BadArgument(mode)
guild = ctx.guild
assert guild is not None
settings = Settings(mode=crosspost_mode)
await self.db.set_settings(guild.id, target.id if target else 0, settings)
message = "Crosspost mode updated"
if target is not None:
message = f"{message} in {target.mention}"
await ctx.send(f"{message}.")
@crosspost.command()
async def pages(
self,
ctx: BContext,
max_pages: int,
*,
target: Union[CategoryChannel, TextChannel] = None,
) -> None:
"""Set the maximum number of images to send.
Set to 0 for no limit."""
guild = ctx.guild
assert guild is not None
settings = Settings(max_pages=max_pages)
await self.db.set_settings(guild.id, target.id if target else 0, settings)
message = f"Max crosspost pages set to {max_pages}"
if target is not None:
message = f"{message} in {target.mention}"
await ctx.send(f"{message}.")
async def crosspost_error(self, ctx: BContext, e: Exception) -> None:
if isinstance(e, BadUnionArgument):
inner = e.errors[0]
assert isinstance(inner, ChannelNotFound)
await ctx.send(
f"Could not resolve `{inner.argument}` as a category or channel"
)
else:
await ctx.bot.handle_error(ctx, e)
auto_error = auto.error(crosspost_error)
mode_error = mode.error(crosspost_error)
pages_error = pages.error(crosspost_error)
@commands.command()
async def post(self, ctx: BContext, *, _: str) -> None:
"""Embed images in the given links regardless of the auto setting."""
new_ctx = await self.bot.get_context(ctx.message, cls=CrosspostContext)
await self.process_links(new_ctx)
@commands.command(aliases=["_"])
async def nopost(self, ctx: BContext, *, _: str = "") -> None:
"""Ignore links in the following message.
You can also use ||spoiler tags|| to achieve the same thing."""
pass
def setup(bot: BeattieBot) -> None:
bot.add_cog(Crosspost(bot))
Normalize imports
from __future__ import annotations
import asyncio
import copy
import re
import sys
import traceback
from asyncio import subprocess
from collections import deque
from collections.abc import Awaitable, Callable
from datetime import datetime, timedelta
from hashlib import md5
from io import BytesIO
from itertools import groupby
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import IO, Any, Optional, TypeVar, Union, overload
from zipfile import ZipFile
import aiohttp
import discord
import toml
from discord import CategoryChannel, File, Message, TextChannel
from discord.ext import commands
from discord.ext.commands import BadUnionArgument, ChannelNotFound, Cog
from discord.utils import snowflake_time, time_snowflake
from lxml import etree
from bot import BeattieBot
from context import BContext
from schema.crosspost import Crosspost as CrosspostSettings
from schema.crosspost import CrosspostMessage, Table
from utils.checks import is_owner_or
from utils.contextmanagers import get as get_
from utils.etc import display_bytes, remove_spoilers
from utils.exceptions import ResponseError
_IO = TypeVar("_IO", bound=IO[bytes])
TWITTER_URL_EXPR = re.compile(
r"https?://(?:(?:www|mobile|m)\.)?(twitter\.com/[^\s/]+/status/\d+)"
)
TWEET_SELECTOR = ".//div[contains(@class, 'permalink-tweet')]"
TWITTER_IMG_SELECTOR = ".//img[@data-aria-label-part]"
TWITTER_IS_GIF = ".//div[contains(@class, 'PlayableMedia--gif')]"
PIXIV_URL_EXPR = re.compile(
r"https?://(?:www\.)?pixiv\.net/(?:member_illust\.php\?"
r"[\w]+=[\w]+(?:&[\w]+=[\w]+)*|(?:\w{2}/)?artworks/\d+(?:#\w*)?)"
)
HICCEARS_URL_EXPR = re.compile(
r"https?://(?:www\.)?hiccears\.com/(?:(?:gallery)|(?:picture))\.php\?[gp]id=\d+"
)
HICCEARS_IMG_SELECTOR = ".//a[contains(@href, 'imgs')]"
HICCEARS_THUMB_SELECTOR = ".//img[contains(@src, 'thumbnails')]"
TUMBLR_URL_EXPR = re.compile(r"https?://[\w-]+\.tumblr\.com/post/\d+")
TUMBLR_IMG_SELECTOR = ".//meta[@property='og:image']"
MASTODON_URL_EXPR = re.compile(r"https?://\S+/[\w-]+/?(?:>|$|\s)")
MASTODON_URL_GROUPS = re.compile(r"https?://([^\s/]+)(?:/.+)+/([\w-]+)")
MASTODON_API_FMT = "https://{}/api/v1/statuses/{}"
INKBUNNY_URL_EXPR = re.compile(
r"https?://(?:www\.)?inkbunny\.net/s/(\d+)(?:-p\d+-)?(?:#.*)?"
)
INKBUNNY_API_FMT = "https://inkbunny.net/api_{}.php"
IMGUR_URL_EXPR = re.compile(r"https?://(?:www\.)?imgur\.com/(?:a|gallery)/(\w+)")
MESSAGE_CACHE_TTL: int = 60 * 60 * 24 # one day in seconds
async def try_wait_for(
proc: asyncio.subprocess.Process, *, timeout: int = 180, kill_timeout: int = 15
) -> bytes:
try:
out, _err = await asyncio.wait_for(proc.communicate(), timeout=timeout)
except asyncio.TimeoutError:
await gently_kill(proc, timeout=kill_timeout)
raise
else:
return out
async def gently_kill(proc: asyncio.subprocess.Process, *, timeout: int):
proc.terminate()
try:
await asyncio.wait_for(proc.wait(), timeout=timeout)
except asyncio.TimeoutError:
proc.kill()
class Settings:
__slots__ = ("auto", "mode", "max_pages")
auto: Optional[bool]
mode: Optional[int]
max_pages: Optional[int]
def __init__(
self,
auto: bool = None,
mode: int = None,
max_pages: int = None,
) -> None:
self.auto = auto
self.mode = mode
self.max_pages = max_pages
def apply(self, other: Settings) -> Settings:
"""Returns a Settings with own values overwritten by non-None values of other"""
out = copy.copy(self)
auto, mode, max_pages = other.auto, other.mode, other.max_pages
if auto is not None:
out.auto = auto
if mode is not None:
out.mode = mode
if max_pages is not None:
out.max_pages = max_pages
return out
def asdict(self) -> dict[str, Any]:
return {k: v for k in self.__slots__ if (v := getattr(self, k)) is not None}
class Database:
def __init__(self, bot: BeattieBot):
self.db = bot.db
self.bot = bot
self.db.bind_tables(Table)
self._settings_cache: dict[tuple[int, int], Settings] = {}
self._expiry_deque: deque[int] = deque()
self._message_cache: dict[int, list[int]] = {}
bot.loop.create_task(self.__init())
async def __init(self) -> None:
await self.bot.wait_until_ready()
for table in (CrosspostSettings, CrosspostMessage):
await table.create(if_not_exists=True) # type: ignore
async with self.db.get_session() as s:
query = (
s.select(CrosspostMessage)
.where(
CrosspostMessage.invoking_message
> time_snowflake(
datetime.utcnow() - timedelta(seconds=MESSAGE_CACHE_TTL)
)
)
.order_by(CrosspostMessage.invoking_message)
)
for invoking_message, elems in groupby(
await (await query.all()).flatten(),
key=lambda elem: elem.invoking_message,
):
self._expiry_deque.append(invoking_message)
self._message_cache[invoking_message] = [
elem.sent_message for elem in elems
]
self._expiry_task = asyncio.create_task(self._expire())
async def _expire(self):
try:
while self._expiry_deque:
entry = self._expiry_deque.popleft()
until = snowflake_time(entry) + timedelta(seconds=MESSAGE_CACHE_TTL)
now = datetime.utcnow()
sleep_time = (until - now).total_seconds()
await asyncio.sleep(sleep_time)
self._message_cache.pop(entry, None)
except Exception as e:
print("Exception in message cache expiry task", file=sys.stderr)
traceback.print_exception(type(e), e, e.__traceback__)
async def get_settings(self, message: Message) -> Settings:
guild = message.guild
assert guild is not None
channel = message.channel
assert isinstance(channel, TextChannel)
guild_id = guild.id
out = await self._get_settings(guild_id, 0)
if category := channel.category:
out = out.apply(await self._get_settings(guild_id, category.id))
out = out.apply(await self._get_settings(guild_id, message.channel.id))
return out
async def _get_settings(self, guild_id: int, channel_id: int) -> Settings:
try:
return self._settings_cache[(guild_id, channel_id)]
except KeyError:
async with self.db.get_session() as s:
query = s.select(CrosspostSettings).where(
(CrosspostSettings.guild_id == guild_id)
& (CrosspostSettings.channel_id == channel_id)
)
config = await query.first()
if config is None:
res = Settings()
else:
res = Settings(config.auto, config.mode, config.max_pages)
self._settings_cache[(guild_id, channel_id)] = res
return res
async def set_settings(
self, guild_id: int, channel_id: int, settings: Settings
) -> None:
self._settings_cache[(guild_id, channel_id)] = settings
kwargs = settings.asdict()
async with self.db.get_session() as s:
row = CrosspostSettings(
guild_id=guild_id,
channel_id=channel_id,
**kwargs,
)
query = s.insert.rows(row)
query = query.on_conflict(
CrosspostSettings.guild_id, CrosspostSettings.channel_id
).update(*(getattr(CrosspostSettings, key) for key in kwargs))
await query.run()
async def get_sent_messages(self, invoking_message: int) -> list[int]:
if sent_messages := self._message_cache.get(invoking_message):
return sent_messages
elif (
datetime.utcnow() - snowflake_time(invoking_message)
).total_seconds() > MESSAGE_CACHE_TTL - 3600: # an hour's leeway
async with self.db.get_session() as s:
query = s.select(CrosspostMessage).where(
CrosspostMessage.invoking_message == invoking_message
)
return [
elem.sent_message for elem in await (await query.all()).flatten()
]
else:
return []
async def add_sent_message(self, invoking_message: int, sent_message: int):
if (messages := self._message_cache.get(invoking_message)) is None:
messages = []
self._message_cache[invoking_message] = messages
self._expiry_deque.append(invoking_message)
messages.append(sent_message)
async with self.db.get_session() as s:
await s.add(
CrosspostMessage(
sent_message=sent_message, invoking_message=invoking_message
)
)
if self._expiry_task.done():
self._expiry_task = asyncio.create_task(self._expire())
async def del_sent_messages(self, invoking_message: int):
self._message_cache.pop(invoking_message, None)
async with self.db.get_session() as s:
await s.delete(CrosspostMessage).where(
CrosspostMessage.invoking_message == invoking_message
).run()
class CrosspostContext(BContext):
cog: Crosspost
async def send(self, content: object = None, **kwargs: Any) -> Message:
task = asyncio.create_task(
self._send(
content,
**kwargs,
)
)
try:
return await asyncio.shield(task)
except asyncio.CancelledError as e:
await asyncio.wait_for(task, timeout=None)
raise e from None
async def _send(
self,
content: object = None,
*,
file: File = None,
**kwargs: Any,
) -> Message:
if file:
fp = file.fp
assert isinstance(fp, BytesIO)
guild = self.guild
assert guild is not None
size = len(fp.getbuffer())
if size >= guild.filesize_limit:
content = f"Image too large to upload ({display_bytes(size)})."
file = None
msg = await super().send(
content,
file=file,
**kwargs,
)
await self.cog.db.add_sent_message(self.message.id, msg.id)
return msg
class Crosspost(Cog):
"""Crossposts images from tweets and other social media"""
bot: BeattieBot
hiccears_headers: dict[str, str] = {}
imgur_headers: dict[str, str] = {}
pixiv_headers: dict[str, str] = {
"App-OS": "ios",
"App-OS-Version": "10.3.1",
"App-Version": "6.7.1",
"User-Agent": "PixivIOSApp/6.7.1 (ios 10.3.1; iPhone8,1)",
}
inkbunny_sid: str = ""
ongoing_tasks: dict[int, asyncio.Task]
expr_dict: dict[re.Pattern, Callable[[CrosspostContext, str], Awaitable[bool]]]
def __init__(self, bot: BeattieBot):
self.bot = bot
self.db = Database(bot)
with open("config/headers.toml") as fp:
self.headers = toml.load(fp)
self.session = aiohttp.ClientSession(loop=bot.loop)
self.parser = etree.HTMLParser()
self.expr_dict = {
expr: getattr(self, f"display_{name.partition('_')[0].lower()}_images")
for name, expr in globals().items()
if name.endswith("URL_EXPR")
}
self.login_task = self.bot.loop.create_task(self.pixiv_login_loop())
self.init_task = bot.loop.create_task(self.__init())
if (ongoing_tasks := bot.extra.get("crosspost_ongoing_tasks")) is not None:
self.ongoing_tasks = ongoing_tasks
else:
self.ongoing_tasks = {}
bot.extra["crosspost_ongoing_tasks"] = self.ongoing_tasks
async def __init(self) -> None:
with open("config/logins.toml") as fp:
data = toml.load(fp)
imgur_id = data["imgur"]["id"]
self.imgur_headers["Authorization"] = f"Client-ID {imgur_id}"
self.hiccears_headers = data["hiccears"]
ib_login = data["inkbunny"]
url = INKBUNNY_API_FMT.format("login")
async with self.get(url, "POST", params=ib_login) as resp:
json = await resp.json()
self.inkbunny_sid = json["sid"]
def cog_check(self, ctx: BContext) -> bool:
return ctx.guild is not None
async def pixiv_login_loop(self) -> None:
url = "https://oauth.secure.pixiv.net/auth/token"
while True:
with open("config/logins.toml") as fp:
logins = toml.load(fp)
login = logins["pixiv"]
data = {
"get_secure_url": 1,
"client_id": "MOBrBDS8blbauoSck0ZfDbtuzpyT",
"client_secret": "lsACyCD94FhDUtGTXi3QzcFE2uU1hqtDaKeqrdwj",
}
if (token := login.get("refresh_token")) is not None:
data["grant_type"] = "refresh_token"
data["refresh_token"] = token
else:
data["grant_type"] = "password"
data["username"] = login["username"]
data["password"] = login["password"]
hash_secret = (
"28c1fdd170a5204386cb1313c7077b34f83e4aaf4aa829ce78c231e05b0bae2c"
)
now = datetime.now().isoformat()
headers = {
"X-Client-Time": now,
"X-Client-Hash": md5((now + hash_secret).encode("utf-8")).hexdigest(),
}
while True:
try:
async with self.get(
url,
"POST",
data=data,
use_default_headers=False,
headers=headers,
) as resp:
res = (await resp.json())["response"]
except Exception as e:
message = "An error occurred in the pixiv login loop"
self.bot.logger.exception(
message, exc_info=(type(e), e, e.__traceback__)
)
else:
break
self.pixiv_headers["Authorization"] = f'Bearer {res["access_token"]}'
login["refresh_token"] = res["refresh_token"]
with open("config/logins.toml", "w") as fp:
toml.dump(logins, fp)
await asyncio.sleep(res["expires_in"])
def cog_unload(self) -> None:
self.bot.loop.create_task(self.session.close())
self.login_task.cancel()
def get(
self,
url: str,
method: str = "GET",
*,
use_default_headers: bool = True,
**kwargs: Any,
) -> get_:
if use_default_headers:
kwargs["headers"] = {**self.headers, **kwargs.get("headers", {})}
return get_(self.session, url, method, **kwargs)
@overload
async def save(
self,
img_url: str,
*,
fp: None = ...,
seek_begin: bool = ...,
use_default_headers: bool = ...,
headers: Optional[dict[str, str]] = ...,
filesize_limit: Optional[int] = ...,
) -> BytesIO:
...
@overload
async def save(
self,
img_url: str,
*,
fp: _IO,
seek_begin: bool = ...,
use_default_headers: bool = ...,
headers: Optional[dict[str, str]] = ...,
filesize_limit: Optional[int] = ...,
) -> _IO:
...
async def save(
self,
img_url: str,
*,
fp=None,
seek_begin: bool = True,
use_default_headers: bool = True,
headers: dict[str, str] = None,
filesize_limit: int = None,
):
headers = headers or {}
img = fp or BytesIO()
length_checked = filesize_limit is None
async with self.get(
img_url, use_default_headers=use_default_headers, headers=headers
) as img_resp:
if not length_checked and img_resp.content_length is not None:
assert filesize_limit is not None
if img_resp.content_length > filesize_limit:
raise ResponseError(413) # yes I know that's not how this works
length_checked = True
async for chunk in img_resp.content.iter_any():
img.write(chunk)
if seek_begin:
img.seek(0)
if not length_checked:
assert filesize_limit is not None
if len(img.getbuffer()) > filesize_limit:
raise ResponseError(413)
return img
async def process_links(self, ctx: CrosspostContext) -> None:
content = remove_spoilers(ctx.message.content)
me = ctx.me
channel = ctx.channel
assert isinstance(me, discord.Member)
assert isinstance(channel, discord.TextChannel)
do_suppress = (
channel.permissions_for(me).manage_messages
and await self.get_mode(ctx) == 2
)
for expr, func in self.expr_dict.items():
for link in expr.findall(content):
try:
if await func(ctx, link) and do_suppress:
try:
await ctx.message.edit(suppress=True)
except (discord.NotFound, discord.Forbidden):
pass
do_suppress = False
except ResponseError as e:
if e.code == 404:
await ctx.send("Post not found.")
else:
await ctx.bot.handle_error(ctx, e)
except Exception as e:
await ctx.bot.handle_error(ctx, e)
@Cog.listener()
async def on_message(self, message: Message) -> None:
if (guild := message.guild) is None or message.author.bot:
return
channel = message.channel
me = guild.me
assert isinstance(channel, discord.TextChannel)
assert isinstance(me, discord.Member)
if not me.permissions_in(channel).send_messages:
return
if not (await self.db.get_settings(message)).auto:
return
if "http" not in message.content:
return
ctx = await self.bot.get_context(message, cls=CrosspostContext)
if ctx.command is None:
ctx.command = self.post
task = asyncio.create_task(self.process_links(ctx))
self.ongoing_tasks[message.id] = task
try:
await asyncio.wait_for(task, None)
except asyncio.CancelledError:
pass
except Exception as e:
raise e
finally:
del self.ongoing_tasks[message.id]
@Cog.listener()
async def on_raw_message_delete(
self, payload: discord.RawMessageDeleteEvent
) -> None:
async def delete_messages(messages: list[int]):
channel_id = payload.channel_id
for message_id in messages:
try:
await self.bot.http.delete_message(channel_id, message_id)
except discord.NotFound:
pass
except discord.Forbidden:
return
message_id = payload.message_id
messages_deleted = False
if task := self.ongoing_tasks.get(message_id):
task.cancel()
if messages := await self.db.get_sent_messages(message_id):
await delete_messages(messages)
messages_deleted = True
if task:
await asyncio.wait([task])
if messages:
await delete_messages(messages)
messages_deleted = True
if messages_deleted:
await self.db.del_sent_messages(message_id)
async def send(
self,
ctx: CrosspostContext,
link: str,
*,
headers: dict[str, str] = None,
use_default_headers: bool = True,
) -> None:
mode = await self.get_mode(ctx)
if mode == 1:
await ctx.send(link)
elif mode == 2:
img = await self.save(
link, headers=headers, use_default_headers=use_default_headers
)
filename = re.findall(r"[\w. -]+\.[\w. -]+", link)[-1]
file = File(img, filename)
await ctx.send(file=file)
else:
raise RuntimeError("Invalid crosspost mode!")
async def get_mode(self, ctx: BContext) -> int:
return (await self.db.get_settings(ctx.message)).mode or 1
async def get_max_pages(self, ctx: BContext) -> int:
settings = await self.db.get_settings(ctx.message)
max_pages = settings.max_pages
if max_pages is None:
max_pages = 4
return max_pages
async def display_twitter_images(self, ctx: CrosspostContext, link: str) -> bool:
if await self.get_mode(ctx) == 1:
return False
link = f"https://{link}"
async with self.get(link) as resp:
root = etree.fromstring(await resp.read(), self.parser)
try:
tweet = root.xpath(TWEET_SELECTOR)[0]
except IndexError:
await ctx.send("Failed to get tweet. Maybe the account is locked?")
return False
if imgs := tweet.xpath(TWITTER_IMG_SELECTOR):
for img in imgs:
url = img.get("src")
await self.send(ctx, f"{url}:orig")
return True
elif tweet.xpath(TWITTER_IS_GIF):
proc = await subprocess.create_subprocess_shell(
f"youtube-dl {link} -o - | "
"ffmpeg -i pipe:0 "
"-vf 'split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse,loop=-1' "
"-f gif pipe:1",
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
gif = BytesIO()
tweet_id = link.rpartition("/")[2].partition("?")[0]
filename = f"{tweet_id}.gif"
try:
stdout = await try_wait_for(proc)
except asyncio.TimeoutError:
await ctx.send("Gif took too long to process.")
return False
gif.write(stdout)
gif.seek(0)
file = File(gif, filename)
msg = await ctx.send(file=file)
return not msg.content.startswith("Image too large to upload")
else:
return False
async def display_pixiv_images(self, ctx: CrosspostContext, link: str) -> bool:
if "mode" in link:
link = re.sub(r"(?<=mode=)\w+", "medium", link)
elif "illust_id" in link:
link = f"{link}&mode=medium"
link = link.replace("http://", "https://")
if match := re.search(r"(?:illust_id=|artworks/)(\d+)", link):
illust_id = match.group(1)
else:
await ctx.send("Failed to find illust ID in pixiv link. This is a bug.")
return False
params = {"illust_id": illust_id}
url = "https://app-api.pixiv.net/v1/illust/detail"
async with self.get(
url, params=params, use_default_headers=False, headers=self.pixiv_headers
) as resp:
res = await resp.json()
try:
res = res["illust"]
except KeyError:
await ctx.send(
"This feature works sometimes, but isn't working right now!"
f"\nDebug info:\n{res.get('error')}"
)
return False
headers = {**self.pixiv_headers, "referer": link}
guild = ctx.guild
assert guild is not None
filesize_limit = guild.filesize_limit
content = None
if single := res["meta_single_page"]:
img_url = single["original_image_url"]
if "ugoira" in img_url:
try:
file = await self.get_ugoira(illust_id)
except asyncio.TimeoutError:
await ctx.send("Ugoira took too long to process.")
return False
else:
content, file = await self.save_pixiv(img_url, headers, filesize_limit)
await ctx.send(content, file=file)
elif multi := res["meta_pages"]:
# multi_image_post
urls = (page["image_urls"]["original"] for page in multi)
max_pages = await self.get_max_pages(ctx)
num_pages = len(multi)
if max_pages == 0:
max_pages = num_pages
tasks = [
self.bot.loop.create_task(
self.save_pixiv(img_url, headers, filesize_limit)
)
for img_url, _ in zip(urls, range(max_pages))
]
for task in tasks:
content, file = await task
await ctx.send(content, file=file)
remaining = num_pages - max_pages
if remaining > 0:
s = "s" if remaining > 1 else ""
message = (
f"{remaining} more image{s} at "
f"<https://www.pixiv.net/en/artworks/{illust_id}>"
)
await ctx.send(message)
else:
return False
return True
async def save_pixiv(
self, img_url: str, headers: dict[str, str], filesize_limit: int
) -> tuple[Optional[str], File]:
content = None
try:
img = await self.save(
img_url, headers=headers, filesize_limit=filesize_limit
)
except ResponseError as e:
if e.code == 413:
img_url = img_url.replace("img-original", "img-master")
head, _, _ext = img_url.rpartition(".")
img_url = f"{head}_master1200.jpg"
img = await self.save(img_url, headers=headers)
content = "Full size too large, standard resolution used."
else:
raise e from None
file = File(img, img_url.rpartition("/")[-1])
return content, file
async def get_ugoira(self, illust_id: str) -> File:
url = "https://app-api.pixiv.net/v1/ugoira/metadata"
params = {"illust_id": illust_id}
headers = self.pixiv_headers
async with self.get(
url, params=params, use_default_headers=False, headers=headers
) as resp:
res = (await resp.json())["ugoira_metadata"]
zip_url = res["zip_urls"]["medium"]
zip_url = re.sub(r"ugoira\d+x\d+", "ugoira1920x1080", zip_url)
headers = {
**self.pixiv_headers,
"referer": f"https://www.pixiv.net/en/artworks/{illust_id}",
}
zip_bytes = await self.save(zip_url, headers=headers, use_default_headers=False)
zfp = ZipFile(zip_bytes)
with TemporaryDirectory() as td:
tempdir = Path(td)
zfp.extractall(tempdir)
with open(tempdir / "durations.txt", "w") as fp:
for frame in res["frames"]:
duration = int(frame["delay"]) / 1000
fp.write(f"file '{frame['file']}'\nduration {duration}\n")
proc = await subprocess.create_subprocess_exec(
"ffmpeg",
"-i",
f"{tempdir}/%06d.jpg",
"-vf",
"palettegen",
f"{tempdir}/palette.png",
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
await proc.wait()
proc = await subprocess.create_subprocess_exec(
"ffmpeg",
"-f",
"concat",
"-safe",
"0",
"-i",
f"{tempdir}/durations.txt",
"-i",
f"{tempdir}/palette.png",
"-lavfi",
"paletteuse",
"-f",
"gif",
"pipe:1",
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
stdout = await try_wait_for(proc)
img = BytesIO(stdout)
img.seek(0)
name = f"{illust_id}.gif"
return File(img, name)
async def display_hiccears_images(self, ctx: CrosspostContext, link: str) -> bool:
async with self.get(link, headers=self.hiccears_headers) as resp:
root = etree.fromstring(await resp.read(), self.parser)
if single_image := root.xpath(HICCEARS_IMG_SELECTOR):
a = single_image[0]
href = a.get("href").lstrip(".")
url = f"https://{resp.host}{href}"
await self.send(ctx, url)
return True
thumbs = root.xpath(HICCEARS_THUMB_SELECTOR)
num_images = len(thumbs)
if num_images == 0:
await ctx.send(
"Hiccears login expired. <@!140293604726800385> needs to fix this. >:("
)
return False
max_pages = await self.get_max_pages(ctx)
if max_pages == 0:
max_pages = num_images
pages_remaining = num_images - max_pages
for thumb in thumbs[:max_pages]:
href = thumb.get("src").lstrip(".").replace("thumbnails", "imgs")[:-4]
url = f"https://{resp.host}{href}.jpg"
await self.send(ctx, url)
if pages_remaining > 0:
s = "s" if pages_remaining > 1 else ""
message = f"{pages_remaining} more image{s} at <{link}>"
await ctx.send(message)
return True
async def display_tumblr_images(self, ctx: CrosspostContext, link: str) -> bool:
idx = 1
async with self.get(link) as resp:
root = etree.fromstring(await resp.read(), self.parser)
if not str(resp.url).startswith(link): # explicit blog redirect
async with self.bot.session.get(
link
) as resp: # somehow this doesn't get redirected?
root = etree.fromstring(await resp.read(), self.parser)
idx = 0
images = root.xpath(TUMBLR_IMG_SELECTOR)
mode = await self.get_mode(ctx)
max_pages = await self.get_max_pages(ctx)
num_images = len(images)
if max_pages == 0:
max_pages = num_images
pages_remaining = num_images - max_pages
images = images[idx:max_pages]
if not images:
return False
for image in images:
url = image.get("content")
await self.send(ctx, url)
if mode == 1 and pages_remaining > 0:
s = "s" if pages_remaining > 1 else ""
message = f"{pages_remaining} more image{s} at <{link}>"
await ctx.send(message)
return True
async def display_mastodon_images(self, ctx: CrosspostContext, link: str) -> bool:
if (match := MASTODON_URL_GROUPS.match(link)) is None:
return False
api_url = MASTODON_API_FMT.format(*match.groups())
try:
async with self.get(api_url, use_default_headers=False) as resp:
post = await resp.json()
except (ResponseError, aiohttp.ClientError):
return False
if not (images := post.get("media_attachments")):
return False
mode = await self.get_mode(ctx)
idx = 0 if mode != 1 or post["sensitive"] else 1
all_embedded = True
for image in images[idx:]:
url = image["remote_url"] or image["url"]
if image.get("type") == "gifv":
with NamedTemporaryFile() as fp:
await self.save(
url, fp=fp, seek_begin=False, use_default_headers=False
)
proc = await asyncio.create_subprocess_exec(
"ffmpeg",
"-i",
fp.name,
"-vf",
"split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse,loop=-1",
"-f",
"gif",
"pipe:1",
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
)
try:
stdout = await try_wait_for(proc)
except asyncio.TimeoutError:
await ctx.send("Gif took too long to process.")
all_embedded = False
continue
img = BytesIO(stdout)
filename = f"{url.rpartition('/')[2]}.gif"
file = File(img, filename)
msg = await ctx.send(file=file)
if all_embedded and msg.content.startswith("Image too large to upload"):
all_embedded = False
else:
await self.send(ctx, url)
return all_embedded
async def display_inkbunny_images(self, ctx: CrosspostContext, sub_id: str) -> bool:
url = INKBUNNY_API_FMT.format("submissions")
params = {"sid": self.inkbunny_sid, "submission_ids": sub_id}
async with self.get(
url, "POST", use_default_headers=False, params=params
) as resp:
response = await resp.json()
sub = response["submissions"][0]
for file in sub["files"]:
url = file["file_url_full"]
await self.send(ctx, url)
return True
async def display_imgur_images(self, ctx: CrosspostContext, album_id: str) -> bool:
async with self.get(
f"https://api.imgur.com/3/album/{album_id}",
use_default_headers=False,
headers=self.imgur_headers,
) as resp:
data = await resp.json()
images = data["data"]["images"]
urls = (image["link"] for image in data["data"]["images"])
max_pages = await self.get_max_pages(ctx)
num_pages = len(images)
if max_pages == 0:
max_pages = num_pages
async def helper(link, n=1):
try:
await self.send(
ctx, link, headers=self.imgur_headers, use_default_headers=False
)
except ResponseError as e:
if e.code == 400 and n <= 10:
await asyncio.sleep(n)
await helper(link, n + 1)
else:
raise e
for img_url, _ in zip(urls, range(max_pages)):
await helper(img_url)
remaining = num_pages - max_pages
if remaining > 0:
s = "s" if remaining > 1 else ""
message = f"{remaining} more image{s} at <https://imgur.com/a/{album_id}>"
await ctx.send(message)
return True
@commands.command(hidden=True)
@is_owner_or(manage_guild=True)
async def twitter(self, ctx: BContext, enabled: Union[bool, str] = True) -> None:
await ctx.send(
"This command is deprecated! "
f"Please use `{ctx.prefix}crosspost` to manage settings."
)
@commands.group()
@is_owner_or(manage_guild=True)
async def crosspost(self, ctx: BContext) -> None:
"""Change image crosspost settings.
Each subcommand takes, in addition to the configuration value, an optional \
target, which specifies a channel or category to apply the setting to, instead of \
applying it to the guild as a whole."""
pass
@crosspost.command()
async def auto(
self,
ctx: BContext,
enabled: bool,
*,
target: Union[CategoryChannel, TextChannel] = None,
) -> None:
"""Enable or disable automatic crossposting."""
guild = ctx.guild
assert guild is not None
settings = Settings(auto=enabled)
await self.db.set_settings(guild.id, target.id if target else 0, settings)
fmt = "en" if enabled else "dis"
message = f"Crossposting images {fmt}abled"
if target is not None:
message = f"{message} in {target.mention}"
await ctx.send(f"{message}.")
@crosspost.command()
async def mode(
self,
ctx: BContext,
mode: str,
*,
target: Union[CategoryChannel, TextChannel] = None,
) -> None:
"""Change image crossposting mode.
link: send a link to images when available
upload: always upload image files
Fetching images from Twitter is disabled in link mode.
When in upload mode and the bot has the Manage Messages permission, it'll \
remove embeds from messages it processes successfully."""
if mode == "link":
crosspost_mode = 1
elif mode == "upload":
crosspost_mode = 2
else:
raise commands.BadArgument(mode)
guild = ctx.guild
assert guild is not None
settings = Settings(mode=crosspost_mode)
await self.db.set_settings(guild.id, target.id if target else 0, settings)
message = "Crosspost mode updated"
if target is not None:
message = f"{message} in {target.mention}"
await ctx.send(f"{message}.")
@crosspost.command()
async def pages(
self,
ctx: BContext,
max_pages: int,
*,
target: Union[CategoryChannel, TextChannel] = None,
) -> None:
"""Set the maximum number of images to send.
Set to 0 for no limit."""
guild = ctx.guild
assert guild is not None
settings = Settings(max_pages=max_pages)
await self.db.set_settings(guild.id, target.id if target else 0, settings)
message = f"Max crosspost pages set to {max_pages}"
if target is not None:
message = f"{message} in {target.mention}"
await ctx.send(f"{message}.")
async def crosspost_error(self, ctx: BContext, e: Exception) -> None:
if isinstance(e, BadUnionArgument):
inner = e.errors[0]
assert isinstance(inner, ChannelNotFound)
await ctx.send(
f"Could not resolve `{inner.argument}` as a category or channel"
)
else:
await ctx.bot.handle_error(ctx, e)
auto_error = auto.error(crosspost_error)
mode_error = mode.error(crosspost_error)
pages_error = pages.error(crosspost_error)
@commands.command()
async def post(self, ctx: BContext, *, _: str) -> None:
"""Embed images in the given links regardless of the auto setting."""
new_ctx = await self.bot.get_context(ctx.message, cls=CrosspostContext)
await self.process_links(new_ctx)
@commands.command(aliases=["_"])
async def nopost(self, ctx: BContext, *, _: str = "") -> None:
"""Ignore links in the following message.
You can also use ||spoiler tags|| to achieve the same thing."""
pass
def setup(bot: BeattieBot) -> None:
bot.add_cog(Crosspost(bot))
|
__author__ = "Jacopo Mauro"
__copyright__ = "Copyright 2016, Jacopo Mauro"
__license__ = "ISC"
__version__ = "0.2"
__maintainer__ = "Jacopo Mauro"
__email__ = "mauro.jacopo@gmail.com"
__status__ = "Prototype"
import sys
import os
import logging as log
import json
import re
# use multiprocessing because antlr is not thread safe
import multiprocessing
import click
import z3
import SpecificationGrammar.SpecTranslator as SpecTranslator
DEVNULL = open(os.devnull, 'wb')
def usage():
"""Print usage"""
print(__doc__)
def read_json(json_file):
json_data = open(json_file)
data = json.load(json_data)
json_data.close()
return data
# function to encode SMT expression into SMTLIB
def toSMT2(f, status="unknown", name="benchmark", logic=""):
v = (z3.Ast * 0)()
return z3.Z3_benchmark_to_smtlib_string(f.ctx_ref(), name, logic, status, "", 0, v, f.as_ast()).replace(
"\n"," ").replace("(check-sat)","").replace("; benchmark (set-info :status unknown)","").strip()
def run_reconfigure(
features,
initial_features,
contexts,
attributes,
constraints,
preferences,
features_as_boolean,
out_stream):
"""Perform the reconfiguration task
"""
solver = z3.Optimize()
log.info("Add variables")
if not features_as_boolean:
for i in features:
solver.add(0 <= z3.Int(i), z3.Int(i) <= 1)
for i in attributes.keys():
solver.add(attributes[i]["min"] <= z3.Int(i), z3.Int(i) <= attributes[i]["max"])
for i in contexts.keys():
solver.add(contexts[i]["min"] <= z3.Int(i), z3.Int(i) <= contexts[i]["max"])
log.info("Enforce context to be equal to intial values")
for i in contexts.keys():
solver.add(contexts[i]["initial"] == z3.Int(i))
log.info("Add constraints")
for i in constraints:
solver.add(i)
log.info("Add preferences")
for i in preferences:
solver.maximize(i)
log.info("Add preference: minimize the number of initial features removed")
if initial_features:
if features_as_boolean:
solver.maximize(z3.Sum([z3.If(z3.Bool(i),1,0) for i in initial_features]))
else:
solver.maximize(z3.Sum([z3.Int(i) for i in initial_features]))
log.info("Add preference: minimize the number of attributes changed")
initial_attributes = [k for k in attributes.keys() if "initial" in attributes[k]]
if initial_attributes:
solver.maximize(
z3.Sum([z3.If(z3.Int(i) == z3.IntVal(attributes[i]["initial"]), 1, 0) for i in initial_attributes]))
log.info("Add preference: minimize the number of non initial features added")
if features.difference(initial_features):
if features_as_boolean:
solver.minimize(z3.Sum([z3.If(z3.Bool(i),1,0) for i in features.difference(initial_features)]))
else:
solver.minimize(z3.Sum([z3.Int(i) for i in features.difference(initial_features)]))
log.debug(unicode(solver))
log.info("Computing reconfiguration")
result = solver.check()
log.info("Printing output")
if result == z3.sat:
model = solver.model()
out = {"result": "sat", "features": [], "attributes": []}
if features_as_boolean:
for i in features:
if model[z3.Bool(i)] == z3.BoolVal(True):
out["features"].append(i)
else:
for i in features:
if model[z3.Int(i)] == z3.IntVal(1):
out["features"].append(i)
for i in attributes.keys():
if attributes[i]["feature"] in out["features"]:
out["attributes"].append({"id": i, "value": unicode(model[z3.Int(i)])})
json.dump(out, out_stream)
out_stream.write("\n")
else:
out_stream.write('{"result": "unsat"}\n')
def run_validate(
features,
initial_features,
contexts,
attributes,
constraints,
preferences,
context_constraints,
features_as_boolean,
out_stream):
"""Perform the validation task
"""
solver = z3.Solver()
log.info("Add context variables")
for i in contexts.keys():
solver.add(contexts[i]["min"] <= z3.Int(i), z3.Int(i) <= contexts[i]["max"])
log.info("Add contexts constraints")
for i in context_constraints:
solver.add(i)
log.info("Building the FM formula")
formulas = []
if not features_as_boolean:
for i in features:
formulas.append(0 <= z3.Int(i))
formulas.append(z3.Int(i) <= 1)
for i in attributes.keys():
formulas.append(attributes[i]["min"] <= z3.Int(i))
formulas.append(z3.Int(i) <= attributes[i]["max"])
for i in constraints:
formulas.append(i)
log.info("Add forall not FM formula")
if features_as_boolean:
solver.add(z3.ForAll(
[z3.Bool(i) for i in features] + [z3.Int(i) for i in attributes.keys()],
z3.Not(z3.And(formulas))
))
else:
solver.add(z3.ForAll(
[z3.Int(i) for i in features] + [z3.Int(i) for i in attributes.keys()],
z3.Not(z3.And(formulas))
))
log.debug(solver)
log.info("Computing")
result = solver.check()
log.info("Printing output")
if result == z3.sat:
model = solver.model()
out = {"result": "not_valid", "contexts": []}
for i in contexts.keys():
out["contexts"].append({"id": i, "value": unicode(model[z3.Int(i)])})
json.dump(out, out_stream)
out_stream.write("\n")
else:
out_stream.write('{"result":"valid"}\n')
def run_explain(
features,
initial_features,
contexts,
attributes,
constraints,
preferences,
data,
features_as_boolean,
out_stream):
"""Get the explanation of the unsat of the FM model
"""
solver = z3.Solver()
solver.set(unsat_core=True)
log.info("Add variables")
if not features_as_boolean:
for i in features:
solver.add(0 <= z3.Int(i), z3.Int(i) <= 1)
for i in attributes.keys():
solver.add(attributes[i]["min"] <= z3.Int(i), z3.Int(i) <= attributes[i]["max"])
for i in contexts.keys():
solver.add(contexts[i]["min"] <= z3.Int(i), z3.Int(i) <= contexts[i]["max"])
log.info("Enforce context to be equal to initial values")
for i in contexts.keys():
solver.add(contexts[i]["initial"] == z3.Int(i))
log.info("Add constraints")
counter = 0
for i in constraints:
solver.assert_and_track(i, 'aux' + str(counter))
counter += 1
log.info("Computing reconfiguration")
result = solver.check()
log.info("Printing output")
if result == z3.sat:
model = solver.model()
out = {"result": "sat", "features": [], "attributes": []}
if features_as_boolean:
for i in features:
if model[z3.Bool(i)] == z3.BoolVal(True):
out["features"].append(i)
else:
for i in features:
if model[z3.Int(i)] == z3.IntVal(1):
out["features"].append(i)
for i in attributes.keys():
if attributes[i]["feature"] in out["features"]:
out["attributes"].append({"id": i, "value": unicode(model[z3.Int(i)])})
json.dump(out, out_stream)
out_stream.write("\n")
else:
core = solver.unsat_core()
log.debug("Core: " + unicode(core))
out = {"result": "unsat", "constraints": []}
for i in range(len(constraints)):
if z3.Bool('aux' + str(i)) in core:
out["constraints"].append(data["constraints"][i])
json.dump(out, out_stream)
out_stream.write("\n")
def run_check_interface(features,
contexts,
attributes,
constraints,
contexts_constraints,
interface,
features_as_boolean,
out_stream):
"""Check if the interface given is a proper interface
"""
# handle FM contexts_constraints
i_features = set()
i_contexts = {}
i_attributes = {}
i_constraints = []
i_contexts_constraints = []
log.info("Processing interface attributes")
for i in interface["attributes"]:
id = re.match("attribute\[(.*)\]", i["id"]).group(1)
i_attributes[id] = {}
i_attributes[id]["min"] = i["min"]
i_attributes[id]["max"] = i["max"]
i_attributes[id]["feature"] = re.match("feature\[(.*)\]", i["featureId"]).group(1)
if (id not in attributes) or \
(attributes[id]["min"] < i_attributes[id]["min"]) or \
(attributes[id]["max"] > i_attributes[id]["max"]) :
json.dump({"result": "not_valid: attribute " + id + "does not match"}, out_stream)
out_stream.write("\n")
return None
log.debug(unicode(attributes))
log.info("Processing contexts")
for i in interface["contexts"]:
id = re.match("context\[(.*)\]", i["id"]).group(1)
i_contexts[id] = {}
i_contexts[id]["min"] = i["min"]
i_contexts[id]["max"] = i["max"]
if (id not in contexts) or \
(contexts[id]["min"] == i_contexts[id]["min"]) or \
(contexts[id]["max"] == i_contexts[id]["max"]):
json.dump({"result": "not_valid: context " + id + "does not match"}, out_stream)
out_stream.write("\n")
return None
log.debug(unicode(contexts))
log.info("Processing Constraints")
for i in interface["constraints"]:
try:
d = SpecTranslator.translate_constraint(i, interface, features_as_boolean)
log.debug("Find constraint " + unicode(d))
i_constraints.append(d["formula"])
i_features.update(d["features"])
except Exception as e:
log.critical("Parsing failed while processing " + i + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
log.info("Processing Context Constraints")
if "context_constraints" in interface:
for i in interface["context_constraints"]:
try:
d = SpecTranslator.translate_constraint(i, interface, features_as_boolean)
log.debug("Find context constraint " + unicode(d))
i_contexts_constraints.append(d["formula"])
except Exception as e:
log.critical("Parsing failed while processing " + i + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
log.info("Checking Context Constraints Extensibility")
solver = z3.Solver()
for i in contexts.keys():
solver.add(contexts[i]["min"] <= z3.Int(i))
solver.add(z3.Int(i) <= contexts[i]["max"])
solver.add(z3.And(i_contexts_constraints))
solver.add(z3.Not(z3.And(contexts_constraints)))
result = solver.check()
if result == z3.sat:
model = solver.model()
out = {"result": "not_valid: context extensibility problem", "contexts": []}
for i in contexts.keys():
out["contexts"].append({"id": i, "value": unicode(model[z3.Int(i)])})
json.dump(out, out_stream)
out_stream.write("\n")
solver = z3.Solver()
log.info("Add interface variables")
if not features_as_boolean:
for i in i_features:
solver.add(0 <= z3.Int(i), z3.Int(i) <= 1)
for i in i_attributes.keys():
solver.add(i_attributes[i]["min"] <= z3.Int(i), z3.Int(i) <= i_attributes[i]["max"])
for i in i_contexts.keys():
solver.add(i_contexts[i]["min"] <= z3.Int(i), z3.Int(i) <= i_contexts[i]["max"])
log.info("Add interface contexts constraints")
solver.add(z3.And(i_contexts_constraints))
solver.add(z3.And(contexts_constraints))
log.info("Add interface constraints")
for i in i_constraints:
solver.add(i)
log.info("Add FM context variables")
for i in contexts.keys():
if i not in i_contexts:
solver.add(contexts[i]["min"] <= z3.Int(i))
solver.add(z3.Int(i) <= contexts[i]["max"])
log.info("Building the FM formula")
formulas = []
if not features_as_boolean:
for i in features:
if i not in i_features:
formulas.append(0 <= z3.Int(i))
formulas.append(z3.Int(i) <= 1)
for i in attributes.keys():
if i not in i_attributes:
formulas.append(attributes[i]["min"] <= z3.Int(i))
formulas.append(z3.Int(i) <= attributes[i]["max"])
for i in constraints:
formulas.append(i)
log.info("Add forall fatures and attributes not formula")
if features_as_boolean:
solver.add(z3.ForAll(
[z3.Bool(i) for i in features if i not in i_features] +
[z3.Int(i) for i in attributes.keys() if i not in i_attributes.keys()],
z3.Not(z3.And(formulas))
))
else:
solver.add(z3.ForAll(
[z3.Int(i) for i in features if i not in i_features] +
[z3.Int(i) for i in attributes.keys() if i not in i_attributes.keys()],
z3.Not(z3.And(formulas))
))
log.debug(solver)
log.info("Computing")
result = solver.check()
log.info("Printing output")
if result == z3.sat:
model = solver.model()
out = {"result": "not_valid", "contexts": [], "attributes": [], "features" : []}
for i in contexts.keys():
out["contexts"].append({"id": i, "value": unicode(model[z3.Int(i)])})
if features_as_boolean:
for i in i_features:
out["features"].append({"id": i, "value": unicode(model[z3.Bool(i)])})
else:
for i in i_features:
out["features"].append({"id": i, "value": unicode(model[z3.Int(i)])})
for i in i_attributes.keys():
out["attributes"].append({"id": i, "value": unicode(model[z3.Int(i)])})
json.dump(out, out_stream)
out_stream.write("\n")
else:
out_stream.write('{"result":"valid"}\n')
def translate_constraints(triple):
c,data,features_as_boolean = triple
try:
d = SpecTranslator.translate_constraint(c, data, features_as_boolean)
except Exception as e:
log.critical("Parsing failed while processing " + c + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
return toSMT2(d["formula"]),d["features"]
@click.command()
@click.argument('input_file',
type=click.Path(exists=True, file_okay=True, dir_okay=False, writable=False, readable=True, resolve_path=True))
@click.option('--num-of-process', '-p', type=click.INT, default=1,
help='Number of process to use for translating the dependencies.')
@click.option('--output-file', '-o',
type=click.Path(exists=False, file_okay=True, dir_okay=False, writable=True, readable=True, resolve_path=True),
help='Output file - Otherwise the output is printed on stdout.')
@click.option('--keep', '-k', is_flag=True,
help="Do not convert dependencies into SMT formulas.")
@click.option('--verbose', '-v', is_flag=True,
help="Print debug messages.")
@click.option('--validate', is_flag=True,
help="Activate the validation mode to check if for all context the FM is not void.")
@click.option('--explain', is_flag=True,
help="Tries to explain why a FM is void.")
@click.option('--check-interface',
default="",
help="Checks if the interface given as additional file is a proper interface.")
@click.option('--features-as-boolean', is_flag=True,
help="Require features in constraints defined as booleans.")
def main(input_file,
num_of_process,
output_file,
keep,
verbose,
validate,
explain,
check_interface,
features_as_boolean):
"""
INPUT_FILE Json input file
"""
modality = "" # default modality is to proceed with the reconfiguration
interface_file = ""
if keep:
global KEEP
KEEP = True
if validate:
modality = "validate"
if explain:
modality = "explain"
if check_interface:
modality = "check-interface"
interface_file = check_interface
if verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
log.info("Verbose output.")
out_stream = sys.stdout
if output_file:
out_stream = open(output_file, "w")
features = set()
initial_features = set()
contexts = {}
attributes = {}
constraints = []
preferences = []
contexts_constraints = []
log.info("Reading input file")
data = read_json(input_file)
log.info("Processing attributes")
for i in data["attributes"]:
id = re.match("attribute\[(.*)\]", i["id"]).group(1)
attributes[id] = {}
attributes[id]["min"] = i["min"]
attributes[id]["max"] = i["max"]
attributes[id]["feature"] = re.match("feature\[(.*)\]", i["featureId"]).group(1)
for i in data["configuration"]["attribute_values"]:
id = re.match("attribute\[(.*)\]", i["id"]).group(1)
attributes[id]["initial"] = i["value"]
log.debug(unicode(attributes))
log.info("Processing contexts")
for i in data["contexts"]:
id = re.match("context\[(.*)\]", i["id"]).group(1)
contexts[id] = {}
contexts[id]["min"] = i["min"]
contexts[id]["max"] = i["max"]
for i in data["configuration"]["context_values"]:
id = re.match("context\[(.*)\]", i["id"]).group(1)
contexts[id]["initial"] = i["value"]
log.debug(unicode(contexts))
log.info("Processing initial features")
for i in data["configuration"]["selectedFeatures"]:
initial_features.add(re.match("feature\[(.*)\]", i).group(1))
log.debug(unicode(initial_features))
log.info("Processing Constraints")
if num_of_process > 1:
# convert in parallel formulas into smt and then parse it here
# threads can not be used here because antlr parser seems not thread safe
# the z3 expression can not be serialized
log.debug("Starting to convert the constraints into smt representation")
log.debug("Constraint to convert: " + unicode(len(data["constraints"])))
pool = multiprocessing.Pool(num_of_process)
results = pool.map(translate_constraints, [(x,data,features_as_boolean) for x in data["constraints"]])
log.debug("Converting smt into z3 expressions")
for smt_f,fs in results:
constraints.append(z3.parse_smt2_string(smt_f))
features.update(fs)
else:
for i in data["constraints"]:
try:
d = SpecTranslator.translate_constraint(i, data, features_as_boolean)
log.debug("Find constrataint " + unicode(d))
constraints.append(d["formula"])
features.update(d["features"])
except Exception as e:
log.critical("Parsing failed while processing " + i + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
# possibility for reconfigure and explain modality to add directly SMT formulas
if "smt_constraints" in data:
log.info("Processing special input constraint modality")
features.update(data["smt_constraints"]["features"])
for i in data["smt_constraints"]["formulas"]:
constraints.append(z3.parse_smt2_string(i))
# for explain purposes add smt_constraint to constraints
data["constraints"].append(i)
log.info("Processing Preferences")
for i in data["preferences"]:
try:
d = SpecTranslator.translate_preference(i, data, features_as_boolean)
log.debug("Find preference " + unicode(d))
preferences.append(d["formula"])
except Exception as e:
log.critical("Parsing failed while processing " + i + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
log.info("Processing Context Constraints")
if "context_constraints" in data:
for i in data["context_constraints"]:
try:
d = SpecTranslator.translate_constraint(i, data, features_as_boolean)
log.debug("Find context constraint " + unicode(d))
contexts_constraints.append(d["formula"])
except Exception as e:
log.critical("Parsing failed while processing " + i + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
if modality == "validate":
run_validate(features, initial_features, contexts, attributes, constraints,
preferences, contexts_constraints, features_as_boolean, out_stream)
elif modality == "explain":
run_explain(features, initial_features, contexts, attributes, constraints,
preferences, data, features_as_boolean, out_stream)
elif modality == "check-interface":
run_check_interface(features, contexts, attributes, constraints, contexts_constraints,
read_json(interface_file), features_as_boolean, out_stream)
else:
run_reconfigure(features, initial_features, contexts, attributes, constraints, preferences,
features_as_boolean, out_stream)
log.info("Program Succesfully Ended")
if __name__ == "__main__":
main()
minimize the value of attributes to allow hyvarrec to print a value when they are not bounded
__author__ = "Jacopo Mauro"
__copyright__ = "Copyright 2016, Jacopo Mauro"
__license__ = "ISC"
__version__ = "0.2"
__maintainer__ = "Jacopo Mauro"
__email__ = "mauro.jacopo@gmail.com"
__status__ = "Prototype"
import sys
import os
import logging as log
import json
import re
# use multiprocessing because antlr is not thread safe
import multiprocessing
import click
import z3
import SpecificationGrammar.SpecTranslator as SpecTranslator
DEVNULL = open(os.devnull, 'wb')
def usage():
"""Print usage"""
print(__doc__)
def read_json(json_file):
json_data = open(json_file)
data = json.load(json_data)
json_data.close()
return data
# function to encode SMT expression into SMTLIB
def toSMT2(f, status="unknown", name="benchmark", logic=""):
v = (z3.Ast * 0)()
return z3.Z3_benchmark_to_smtlib_string(f.ctx_ref(), name, logic, status, "", 0, v, f.as_ast()).replace(
"\n"," ").replace("(check-sat)","").replace("; benchmark (set-info :status unknown)","").strip()
def run_reconfigure(
features,
initial_features,
contexts,
attributes,
constraints,
preferences,
features_as_boolean,
out_stream):
"""Perform the reconfiguration task
"""
solver = z3.Optimize()
log.info("Add variables")
if not features_as_boolean:
for i in features:
solver.add(0 <= z3.Int(i), z3.Int(i) <= 1)
for i in attributes.keys():
solver.add(attributes[i]["min"] <= z3.Int(i), z3.Int(i) <= attributes[i]["max"])
for i in contexts.keys():
solver.add(contexts[i]["min"] <= z3.Int(i), z3.Int(i) <= contexts[i]["max"])
log.info("Enforce context to be equal to intial values")
for i in contexts.keys():
solver.add(contexts[i]["initial"] == z3.Int(i))
log.info("Add constraints")
for i in constraints:
solver.add(i)
log.info("Add preferences")
for i in preferences:
solver.maximize(i)
log.info("Add preference: minimize the number of initial features removed")
if initial_features:
if features_as_boolean:
solver.maximize(z3.Sum([z3.If(z3.Bool(i),1,0) for i in initial_features]))
else:
solver.maximize(z3.Sum([z3.Int(i) for i in initial_features]))
log.info("Add preference: minimize the number of attributes changed")
initial_attributes = [k for k in attributes.keys() if "initial" in attributes[k]]
if initial_attributes:
solver.maximize(
z3.Sum([z3.If(z3.Int(i) == z3.IntVal(attributes[i]["initial"]), 1, 0) for i in initial_attributes]))
log.info("Add preference: minimize the number of non initial features added")
if features.difference(initial_features):
if features_as_boolean:
solver.minimize(z3.Sum([z3.If(z3.Bool(i),1,0) for i in features.difference(initial_features)]))
else:
solver.minimize(z3.Sum([z3.Int(i) for i in features.difference(initial_features)]))
log.info("Add preference: minimize the values of the attributes")
for i in attributes.keys():
solver.minimize(z3.Int(i))
log.debug(unicode(solver))
log.info("Computing reconfiguration")
result = solver.check()
log.info("Printing output")
if result == z3.sat:
model = solver.model()
out = {"result": "sat", "features": [], "attributes": []}
if features_as_boolean:
for i in features:
if model[z3.Bool(i)] == z3.BoolVal(True):
out["features"].append(i)
else:
for i in features:
if model[z3.Int(i)] == z3.IntVal(1):
out["features"].append(i)
for i in attributes.keys():
if attributes[i]["feature"] in out["features"]:
out["attributes"].append({"id": i, "value": unicode(model[z3.Int(i)])})
json.dump(out, out_stream)
out_stream.write("\n")
else:
out_stream.write('{"result": "unsat"}\n')
def run_validate(
features,
initial_features,
contexts,
attributes,
constraints,
preferences,
context_constraints,
features_as_boolean,
out_stream):
"""Perform the validation task
"""
solver = z3.Solver()
log.info("Add context variables")
for i in contexts.keys():
solver.add(contexts[i]["min"] <= z3.Int(i), z3.Int(i) <= contexts[i]["max"])
log.info("Add contexts constraints")
for i in context_constraints:
solver.add(i)
log.info("Building the FM formula")
formulas = []
if not features_as_boolean:
for i in features:
formulas.append(0 <= z3.Int(i))
formulas.append(z3.Int(i) <= 1)
for i in attributes.keys():
formulas.append(attributes[i]["min"] <= z3.Int(i))
formulas.append(z3.Int(i) <= attributes[i]["max"])
for i in constraints:
formulas.append(i)
log.info("Add forall not FM formula")
if features_as_boolean:
solver.add(z3.ForAll(
[z3.Bool(i) for i in features] + [z3.Int(i) for i in attributes.keys()],
z3.Not(z3.And(formulas))
))
else:
solver.add(z3.ForAll(
[z3.Int(i) for i in features] + [z3.Int(i) for i in attributes.keys()],
z3.Not(z3.And(formulas))
))
log.debug(solver)
log.info("Computing")
result = solver.check()
log.info("Printing output")
if result == z3.sat:
model = solver.model()
out = {"result": "not_valid", "contexts": []}
for i in contexts.keys():
out["contexts"].append({"id": i, "value": unicode(model[z3.Int(i)])})
json.dump(out, out_stream)
out_stream.write("\n")
else:
out_stream.write('{"result":"valid"}\n')
def run_explain(
features,
initial_features,
contexts,
attributes,
constraints,
preferences,
data,
features_as_boolean,
out_stream):
"""Get the explanation of the unsat of the FM model
"""
solver = z3.Solver()
solver.set(unsat_core=True)
log.info("Add variables")
if not features_as_boolean:
for i in features:
solver.add(0 <= z3.Int(i), z3.Int(i) <= 1)
for i in attributes.keys():
solver.add(attributes[i]["min"] <= z3.Int(i), z3.Int(i) <= attributes[i]["max"])
for i in contexts.keys():
solver.add(contexts[i]["min"] <= z3.Int(i), z3.Int(i) <= contexts[i]["max"])
log.info("Enforce context to be equal to initial values")
for i in contexts.keys():
solver.add(contexts[i]["initial"] == z3.Int(i))
log.info("Add constraints")
counter = 0
for i in constraints:
solver.assert_and_track(i, 'aux' + str(counter))
counter += 1
log.info("Computing reconfiguration")
result = solver.check()
log.info("Printing output")
if result == z3.sat:
model = solver.model()
out = {"result": "sat", "features": [], "attributes": []}
if features_as_boolean:
for i in features:
if model[z3.Bool(i)] == z3.BoolVal(True):
out["features"].append(i)
else:
for i in features:
if model[z3.Int(i)] == z3.IntVal(1):
out["features"].append(i)
for i in attributes.keys():
if attributes[i]["feature"] in out["features"]:
out["attributes"].append({"id": i, "value": unicode(model[z3.Int(i)])})
json.dump(out, out_stream)
out_stream.write("\n")
else:
core = solver.unsat_core()
log.debug("Core: " + unicode(core))
out = {"result": "unsat", "constraints": []}
for i in range(len(constraints)):
if z3.Bool('aux' + str(i)) in core:
out["constraints"].append(data["constraints"][i])
json.dump(out, out_stream)
out_stream.write("\n")
def run_check_interface(features,
contexts,
attributes,
constraints,
contexts_constraints,
interface,
features_as_boolean,
out_stream):
"""Check if the interface given is a proper interface
"""
# handle FM contexts_constraints
i_features = set()
i_contexts = {}
i_attributes = {}
i_constraints = []
i_contexts_constraints = []
log.info("Processing interface attributes")
for i in interface["attributes"]:
id = re.match("attribute\[(.*)\]", i["id"]).group(1)
i_attributes[id] = {}
i_attributes[id]["min"] = i["min"]
i_attributes[id]["max"] = i["max"]
i_attributes[id]["feature"] = re.match("feature\[(.*)\]", i["featureId"]).group(1)
if (id not in attributes) or \
(attributes[id]["min"] < i_attributes[id]["min"]) or \
(attributes[id]["max"] > i_attributes[id]["max"]) :
json.dump({"result": "not_valid: attribute " + id + "does not match"}, out_stream)
out_stream.write("\n")
return None
log.debug(unicode(attributes))
log.info("Processing contexts")
for i in interface["contexts"]:
id = re.match("context\[(.*)\]", i["id"]).group(1)
i_contexts[id] = {}
i_contexts[id]["min"] = i["min"]
i_contexts[id]["max"] = i["max"]
if (id not in contexts) or \
(contexts[id]["min"] == i_contexts[id]["min"]) or \
(contexts[id]["max"] == i_contexts[id]["max"]):
json.dump({"result": "not_valid: context " + id + "does not match"}, out_stream)
out_stream.write("\n")
return None
log.debug(unicode(contexts))
log.info("Processing Constraints")
for i in interface["constraints"]:
try:
d = SpecTranslator.translate_constraint(i, interface, features_as_boolean)
log.debug("Find constraint " + unicode(d))
i_constraints.append(d["formula"])
i_features.update(d["features"])
except Exception as e:
log.critical("Parsing failed while processing " + i + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
log.info("Processing Context Constraints")
if "context_constraints" in interface:
for i in interface["context_constraints"]:
try:
d = SpecTranslator.translate_constraint(i, interface, features_as_boolean)
log.debug("Find context constraint " + unicode(d))
i_contexts_constraints.append(d["formula"])
except Exception as e:
log.critical("Parsing failed while processing " + i + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
log.info("Checking Context Constraints Extensibility")
solver = z3.Solver()
for i in contexts.keys():
solver.add(contexts[i]["min"] <= z3.Int(i))
solver.add(z3.Int(i) <= contexts[i]["max"])
solver.add(z3.And(i_contexts_constraints))
solver.add(z3.Not(z3.And(contexts_constraints)))
result = solver.check()
if result == z3.sat:
model = solver.model()
out = {"result": "not_valid: context extensibility problem", "contexts": []}
for i in contexts.keys():
out["contexts"].append({"id": i, "value": unicode(model[z3.Int(i)])})
json.dump(out, out_stream)
out_stream.write("\n")
solver = z3.Solver()
log.info("Add interface variables")
if not features_as_boolean:
for i in i_features:
solver.add(0 <= z3.Int(i), z3.Int(i) <= 1)
for i in i_attributes.keys():
solver.add(i_attributes[i]["min"] <= z3.Int(i), z3.Int(i) <= i_attributes[i]["max"])
for i in i_contexts.keys():
solver.add(i_contexts[i]["min"] <= z3.Int(i), z3.Int(i) <= i_contexts[i]["max"])
log.info("Add interface contexts constraints")
solver.add(z3.And(i_contexts_constraints))
solver.add(z3.And(contexts_constraints))
log.info("Add interface constraints")
for i in i_constraints:
solver.add(i)
log.info("Add FM context variables")
for i in contexts.keys():
if i not in i_contexts:
solver.add(contexts[i]["min"] <= z3.Int(i))
solver.add(z3.Int(i) <= contexts[i]["max"])
log.info("Building the FM formula")
formulas = []
if not features_as_boolean:
for i in features:
if i not in i_features:
formulas.append(0 <= z3.Int(i))
formulas.append(z3.Int(i) <= 1)
for i in attributes.keys():
if i not in i_attributes:
formulas.append(attributes[i]["min"] <= z3.Int(i))
formulas.append(z3.Int(i) <= attributes[i]["max"])
for i in constraints:
formulas.append(i)
log.info("Add forall fatures and attributes not formula")
if features_as_boolean:
solver.add(z3.ForAll(
[z3.Bool(i) for i in features if i not in i_features] +
[z3.Int(i) for i in attributes.keys() if i not in i_attributes.keys()],
z3.Not(z3.And(formulas))
))
else:
solver.add(z3.ForAll(
[z3.Int(i) for i in features if i not in i_features] +
[z3.Int(i) for i in attributes.keys() if i not in i_attributes.keys()],
z3.Not(z3.And(formulas))
))
log.debug(solver)
log.info("Computing")
result = solver.check()
log.info("Printing output")
if result == z3.sat:
model = solver.model()
out = {"result": "not_valid", "contexts": [], "attributes": [], "features" : []}
for i in contexts.keys():
out["contexts"].append({"id": i, "value": unicode(model[z3.Int(i)])})
if features_as_boolean:
for i in i_features:
out["features"].append({"id": i, "value": unicode(model[z3.Bool(i)])})
else:
for i in i_features:
out["features"].append({"id": i, "value": unicode(model[z3.Int(i)])})
for i in i_attributes.keys():
out["attributes"].append({"id": i, "value": unicode(model[z3.Int(i)])})
json.dump(out, out_stream)
out_stream.write("\n")
else:
out_stream.write('{"result":"valid"}\n')
def translate_constraints(triple):
c,data,features_as_boolean = triple
try:
d = SpecTranslator.translate_constraint(c, data, features_as_boolean)
except Exception as e:
log.critical("Parsing failed while processing " + c + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
return toSMT2(d["formula"]),d["features"]
@click.command()
@click.argument('input_file',
type=click.Path(exists=True, file_okay=True, dir_okay=False, writable=False, readable=True, resolve_path=True))
@click.option('--num-of-process', '-p', type=click.INT, default=1,
help='Number of process to use for translating the dependencies.')
@click.option('--output-file', '-o',
type=click.Path(exists=False, file_okay=True, dir_okay=False, writable=True, readable=True, resolve_path=True),
help='Output file - Otherwise the output is printed on stdout.')
@click.option('--keep', '-k', is_flag=True,
help="Do not convert dependencies into SMT formulas.")
@click.option('--verbose', '-v', is_flag=True,
help="Print debug messages.")
@click.option('--validate', is_flag=True,
help="Activate the validation mode to check if for all context the FM is not void.")
@click.option('--explain', is_flag=True,
help="Tries to explain why a FM is void.")
@click.option('--check-interface',
default="",
help="Checks if the interface given as additional file is a proper interface.")
@click.option('--features-as-boolean', is_flag=True,
help="Require features in constraints defined as booleans.")
def main(input_file,
num_of_process,
output_file,
keep,
verbose,
validate,
explain,
check_interface,
features_as_boolean):
"""
INPUT_FILE Json input file
"""
modality = "" # default modality is to proceed with the reconfiguration
interface_file = ""
if keep:
global KEEP
KEEP = True
if validate:
modality = "validate"
if explain:
modality = "explain"
if check_interface:
modality = "check-interface"
interface_file = check_interface
if verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
log.info("Verbose output.")
out_stream = sys.stdout
if output_file:
out_stream = open(output_file, "w")
features = set()
initial_features = set()
contexts = {}
attributes = {}
constraints = []
preferences = []
contexts_constraints = []
log.info("Reading input file")
data = read_json(input_file)
log.info("Processing attributes")
for i in data["attributes"]:
id = re.match("attribute\[(.*)\]", i["id"]).group(1)
attributes[id] = {}
attributes[id]["min"] = i["min"]
attributes[id]["max"] = i["max"]
attributes[id]["feature"] = re.match("feature\[(.*)\]", i["featureId"]).group(1)
for i in data["configuration"]["attribute_values"]:
id = re.match("attribute\[(.*)\]", i["id"]).group(1)
attributes[id]["initial"] = i["value"]
log.debug(unicode(attributes))
log.info("Processing contexts")
for i in data["contexts"]:
id = re.match("context\[(.*)\]", i["id"]).group(1)
contexts[id] = {}
contexts[id]["min"] = i["min"]
contexts[id]["max"] = i["max"]
for i in data["configuration"]["context_values"]:
id = re.match("context\[(.*)\]", i["id"]).group(1)
contexts[id]["initial"] = i["value"]
log.debug(unicode(contexts))
log.info("Processing initial features")
for i in data["configuration"]["selectedFeatures"]:
initial_features.add(re.match("feature\[(.*)\]", i).group(1))
log.debug(unicode(initial_features))
log.info("Processing Constraints")
if num_of_process > 1:
# convert in parallel formulas into smt and then parse it here
# threads can not be used here because antlr parser seems not thread safe
# the z3 expression can not be serialized
log.debug("Starting to convert the constraints into smt representation")
log.debug("Constraint to convert: " + unicode(len(data["constraints"])))
pool = multiprocessing.Pool(num_of_process)
results = pool.map(translate_constraints, [(x,data,features_as_boolean) for x in data["constraints"]])
log.debug("Converting smt into z3 expressions")
for smt_f,fs in results:
constraints.append(z3.parse_smt2_string(smt_f))
features.update(fs)
else:
for i in data["constraints"]:
try:
d = SpecTranslator.translate_constraint(i, data, features_as_boolean)
log.debug("Find constrataint " + unicode(d))
constraints.append(d["formula"])
features.update(d["features"])
except Exception as e:
log.critical("Parsing failed while processing " + i + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
# possibility for reconfigure and explain modality to add directly SMT formulas
if "smt_constraints" in data:
log.info("Processing special input constraint modality")
features.update(data["smt_constraints"]["features"])
for i in data["smt_constraints"]["formulas"]:
constraints.append(z3.parse_smt2_string(i))
# for explain purposes add smt_constraint to constraints
data["constraints"].append(i)
log.info("Processing Preferences")
for i in data["preferences"]:
try:
d = SpecTranslator.translate_preference(i, data, features_as_boolean)
log.debug("Find preference " + unicode(d))
preferences.append(d["formula"])
except Exception as e:
log.critical("Parsing failed while processing " + i + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
log.info("Processing Context Constraints")
if "context_constraints" in data:
for i in data["context_constraints"]:
try:
d = SpecTranslator.translate_constraint(i, data, features_as_boolean)
log.debug("Find context constraint " + unicode(d))
contexts_constraints.append(d["formula"])
except Exception as e:
log.critical("Parsing failed while processing " + i + ": " + str(e))
log.critical("Exiting")
sys.exit(1)
if modality == "validate":
run_validate(features, initial_features, contexts, attributes, constraints,
preferences, contexts_constraints, features_as_boolean, out_stream)
elif modality == "explain":
run_explain(features, initial_features, contexts, attributes, constraints,
preferences, data, features_as_boolean, out_stream)
elif modality == "check-interface":
run_check_interface(features, contexts, attributes, constraints, contexts_constraints,
read_json(interface_file), features_as_boolean, out_stream)
else:
run_reconfigure(features, initial_features, contexts, attributes, constraints, preferences,
features_as_boolean, out_stream)
log.info("Program Succesfully Ended")
if __name__ == "__main__":
main()
|
from discord.ext import commands
import discord
from bayohwoolph import bot
from config import Config
import logging
logger = logging.getLogger('bayohwoolph.cogs.mgmanager')
MULTIGAMETAGS = Config.config['MULTIGAMETAGS']
ROLE_MULTIGAME = MULTIGAMETAGS['ROLE_MULTIGAME']
ROLE_APEX = MULTIGAMETAGS['ROLE_APEX']
ROLE_WOW = MULTIGAMETAGS['ROLE_WOW']
ROLE_TCTD2 = MULTIGAMETAGS['ROLE_TCTD2']
ROLE_FORTNITE = MULTIGAMETAGS['ROLE_FORTNITE']
ROLE_ROE = MULTIGAMETAGS['ROLE_ROE']
ROLE_SOT = MULTIGAMETAGS['ROLE_SOT']
ROLE_MINECRAFT = MULTIGAMETAGS['ROLE_MINECRAFT']
Welcome_Message = """Congrats for joining the Multi-Game side of Dark Echo!!!
Below you will find a list of games that we have tags for. If you don't see a tag then please ping <@&551503870048862226 with the name of the game and we can look to add it.
Minecraft
Sea Of Thieves
Ring of Elysium
Fortnite
Apex Legends
The Division 2
World of Warships
Please use $"gamename" to add game tags to yourself doing so allows you to see any game specific channels.
Any questions please ping <@&551503870048862226 with your question and we will get back to you asap.
"""
class Multigame:
def __init__(self,bot):
self.bot = bot
@commands.command()
async def Multigame(self, ctx):
mg_role = discord.utils.get(ctx.guild.roles, id=int(ROLE_MULTIGAME))
member = ctx.message.author
try:
await member.add_roles(mg_role)
await ctx.send(Welcome_Message)
except:
await ctx.send('Error Unable to add role')
def setup(bot):
bot.add_cog(Multigame(bot))
updated to only control the Mg tag assignment
from discord.ext import commands
import discord
from bayohwoolph import bot
from config import Config
import logging
logger = logging.getLogger('bayohwoolph.cogs.mgmanager')
MULTIGAMETAGS = Config.config['MULTIGAMETAGS']
ROLE_MULTIGAME = MULTIGAMETAGS['ROLE_MULTIGAME']
Welcome_Message = """Congrats for joining the Multi-Game side of Dark Echo!!!
Below you will find a list of games that we have tags for. If you don't see a tag then please ping <@&551503870048862226> with the name of the game and we can look to add it.
Please use the following commands to add game tags:
$Minecraft
$SeaOfThieves
$RingOfElysium
$Fortnite
$ApexLegends
$TheDivision2
$WorldOfWarships
Any questions please ping <@&551503870048862226> with your question and we will get back to you asap.
"""
class Multigame:
def __init__(self,bot):
self.bot = bot
@commands.command()
async def Multigame(self, ctx):
"""Adds Multigame tags"""
mg_role = discord.utils.get(ctx.guild.roles, id=int(ROLE_MULTIGAME))
member = ctx.message.author
try:
await member.add_roles(mg_role)
await ctx.send(Welcome_Message)
except:
await ctx.send('Error Unable to add role')
def setup(bot):
bot.add_cog(Multigame(bot)) |
import json
import os
import re
from discord.ext import commands
import aiohttp
from .utils import utils
ow_storage = os.path.join(os.path.split(os.path.split(__file__)[0])[0],
'ow.dat')
if not os.path.exists(ow_storage):
with open(ow_storage, 'w') as fp:
fp.write('{}')
endpoint = "https://owapi.net/api/v2/u/{{tag}}/{cat}/{{tier}}"
stat_endpoint = endpoint.format(cat='stats')
hero_endpoint = endpoint.format(cat='heroes')
class Not200(Exception):
pass
def player_tag(arg):
match = re.match(r'<@!?([0-9]+)>$', arg)
if match is not None:
return match.group(1)
else:
return arg[::-1].replace('#', '-', 1)[::-1]
def ow_tier(arg):
if arg in ('quick', 'quickplay', 'qp', 'general'):
return 'general'
return 'competitive'
def ow_level(stats):
overall = stats['overall_stats']
level = ''
prestige = overall['prestige']
if prestige:
level += str(prestige) + '+'
level += str(overall['level'])
return level
def time_from_decimal(dec):
return divmod(round(dec * 60), 60)
def most_played(hero_dict):
mp_time = max(hero_dict.values())
for hero, played in hero_dict.items():
if played == mp_time:
return hero.title(), time_from_decimal(played)
def time_str(tupdec):
if isinstance(tupdec, tuple):
hours, minutes = tupdec
else:
hours, minutes = time_from_decimal(tupdec)
if hours:
fmt = '{h} hour{hp}, {m} minute{mp}'
elif minutes:
fmt = '{m} minute{mp}'
else:
fmt = 'None'
return fmt.format(h=hours, hp=utils.plural(hours),
m=minutes, mp=utils.plural(minutes))
class Overwatch:
def __init__(self, bot):
self.bot = bot
with open(ow_storage) as fp:
self.idents = json.load(fp)
async def fetch_stats(self, tag, tier, it=0):
if it == 2:
raise RecursionError
with aiohttp.Timeout(10):
async with self.bot.aiohsession.get(
stat_endpoint.format(tag=tag, tier=tier)) as resp:
s1, j1 = resp.status, await resp.json()
async with self.bot.aiohsession.get(
hero_endpoint.format(tag=tag, tier=tier)) as resp:
s2, j2 = resp.status, await resp.json()
if tier == 'competitive' and (s1 != 200 or s2 != 200):
try:
j1['msg']
except:
raise Not200
else:
j1, j2, tier = await self.fetch_stats(tag, 'general', it + 1)
return j1, j2, tier
def get_tag(self, ctx, tag):
member_id = None
if tag == '' or '-' not in tag:
member_id = tag or ctx.message.author.id
tag = self.idents[member_id]['btag']
return tag, member_id
def get_tier(self, member_id):
return self.idents[member_id]['tier']
@commands.group(aliases=['ow'], pass_context=True,
invoke_without_command=True)
async def overwatch(self, ctx, tag: player_tag = '', tier=None):
"""See stats of yourself or another player.
[tag] can be either BattleTag or a mention to someone in the db
[tier] can be 'quick', 'quickplay', 'qp', 'comp', or 'competitive'
"""
try:
tag, member_id = self.get_tag(ctx, tag)
except KeyError:
await self.bot.say("Not in the db.")
return
if tier is not None:
tier = ow_tier(tier)
else:
try:
tier = self.get_tier(member_id)
except KeyError:
tier = 'competitive'
stats, heroes, tier = await self.fetch_stats(tag, tier)
heroes = heroes['heroes']
if tier == 'general':
tier = 'quickplay'
mp_hero, mp_time = most_played(heroes)
message = ['{} stats:'.format(tier.title())]
lines = [
('Battletag', stats['battletag'][::-1].replace('-', '#', 1)[::-1]),
('Time played', time_str(stats['game_stats']['time_played'])),
('Level', ow_level(stats)),
('Comp Rank', stats['overall_stats']['comprank'] or 'Unranked'),
('Most Played Hero', mp_hero),
('Hero Time', time_str(mp_time)),
('Games Played', stats['overall_stats']['games']),
('Games Won', stats['overall_stats']['wins']),
('Win Rate', '{}%'.format(stats['overall_stats']['win_rate'])),
('Kill/death', round(stats['game_stats']['kpd'], 2))]
try:
lines.append(('Environmental Deaths',
int(stats['game_stats']['environmental_deaths'])))
except:
pass
message.append('```xl')
width = max(len(k) for k, v in lines)
for line in lines:
message.append('{0:<{width}} : {1}'.format(*line, width=width))
message.append('```')
await self.bot.say('\n'.join(message))
@overwatch.command(pass_context=True)
async def heroes(self, ctx, tag: player_tag = '', tier=None):
"""Get stats for each played hero.
[tag] can be either BattleTag or a mention to someone in the db
[tier] can be 'quick', 'quickplay', 'qp', 'comp', or 'competitive'
"""
try:
tag, member_id = self.get_tag(ctx, tag)
except KeyError:
await self.bot.say("Not in the db.")
return
if tier is not None:
tier = ow_tier(tier)
else:
try:
tier = self.get_tier(member_id)
except KeyError:
tier = 'competitive'
stats, heroes, tier = await self.fetch_stats(tag, tier)
heroes = heroes['heroes']
if tier == 'general':
tier = 'quickplay'
message = ['{} hero stats:'.format(tier.title())]
width = max(len(k) for k in heroes.keys())
message.append('```xl')
for hero, time in sorted(heroes.items(), key=lambda kv: kv[1],
reverse=True):
message.append('{0:<{width}} : {1}'.format(hero.title(),
time_str(heroes[hero]),
width=width))
message.append('```')
await self.bot.say('\n'.join(message))
@overwatch.command(name='set', aliases=['save'], pass_context=True)
async def ow_set(self, ctx, tag, tier='competitive'):
"""Set your battletag and default tier."""
tier = ow_tier(tier)
tag = tag[::-1].replace('#', '-', 1)[::-1]
self.idents[ctx.message.author.id] = {'btag': tag, 'tier': tier}
with open(ow_storage, 'w') as fp:
json.dump(self.idents, fp)
await self.bot.say('\N{OK HAND SIGN} Added to db.')
def setup(bot):
bot.add_cog(Overwatch(bot))
modify level display, add arg description to set
import json
import os
import re
from discord.ext import commands
import aiohttp
from .utils import utils
ow_storage = os.path.join(os.path.split(os.path.split(__file__)[0])[0],
'ow.dat')
if not os.path.exists(ow_storage):
with open(ow_storage, 'w') as fp:
fp.write('{}')
endpoint = "https://owapi.net/api/v2/u/{{tag}}/{cat}/{{tier}}"
stat_endpoint = endpoint.format(cat='stats')
hero_endpoint = endpoint.format(cat='heroes')
class Not200(Exception):
pass
def player_tag(arg):
match = re.match(r'<@!?([0-9]+)>$', arg)
if match is not None:
return match.group(1)
else:
return arg[::-1].replace('#', '-', 1)[::-1]
def ow_tier(arg):
if arg in ('quick', 'quickplay', 'qp', 'general'):
return 'general'
return 'competitive'
def ow_level(overall_stats):
total = overall_stats['prestige'] * 100
total += 5
return total
def time_from_decimal(dec):
return divmod(round(dec * 60), 60)
def most_played(hero_dict):
mp_time = max(hero_dict.values())
for hero, played in hero_dict.items():
if played == mp_time:
return hero.title(), time_from_decimal(played)
def time_str(tupdec):
if isinstance(tupdec, tuple):
hours, minutes = tupdec
else:
hours, minutes = time_from_decimal(tupdec)
if hours:
fmt = '{h} hour{hp}, {m} minute{mp}'
elif minutes:
fmt = '{m} minute{mp}'
else:
fmt = 'None'
return fmt.format(h=hours, hp=utils.plural(hours),
m=minutes, mp=utils.plural(minutes))
class Overwatch:
def __init__(self, bot):
self.bot = bot
with open(ow_storage) as fp:
self.idents = json.load(fp)
async def fetch_stats(self, tag, tier, it=0):
if it == 2:
raise RecursionError
with aiohttp.Timeout(10):
async with self.bot.aiohsession.get(
stat_endpoint.format(tag=tag, tier=tier)) as resp:
s1, j1 = resp.status, await resp.json()
async with self.bot.aiohsession.get(
hero_endpoint.format(tag=tag, tier=tier)) as resp:
s2, j2 = resp.status, await resp.json()
if tier == 'competitive' and (s1 != 200 or s2 != 200):
try:
j1['msg']
except:
raise Not200
else:
j1, j2, tier = await self.fetch_stats(tag, 'general', it + 1)
return j1, j2, tier
def get_tag(self, ctx, tag):
member_id = None
if tag == '' or '-' not in tag:
member_id = tag or ctx.message.author.id
tag = self.idents[member_id]['btag']
return tag, member_id
def get_tier(self, member_id):
return self.idents[member_id]['tier']
@commands.group(aliases=['ow'], pass_context=True,
invoke_without_command=True)
async def overwatch(self, ctx, tag: player_tag = '', tier=None):
"""See stats of yourself or another player.
[tag] can be either BattleTag or a mention to someone in the db
[tier] can be 'quick', 'quickplay', 'qp', 'comp', or 'competitive'
"""
try:
tag, member_id = self.get_tag(ctx, tag)
except KeyError:
await self.bot.say("Not in the db.")
return
if tier is not None:
tier = ow_tier(tier)
else:
try:
tier = self.get_tier(member_id)
except KeyError:
tier = 'competitive'
stats, heroes, tier = await self.fetch_stats(tag, tier)
heroes = heroes['heroes']
if tier == 'general':
tier = 'quickplay'
mp_hero, mp_time = most_played(heroes)
message = ['{} stats:'.format(tier.title())]
lines = [
('Battletag', stats['battletag'][::-1].replace('-', '#', 1)[::-1]),
('Time played', time_str(stats['game_stats']['time_played'])),
('Level', ow_level(stats['overall_stats'])),
('Comp Rank', stats['overall_stats']['comprank'] or 'Unranked'),
('Most Played Hero', mp_hero),
('Hero Time', time_str(mp_time)),
('Games Played', stats['overall_stats']['games']),
('Games Won', stats['overall_stats']['wins']),
('Win Rate', '{}%'.format(stats['overall_stats']['win_rate'])),
('Kill/death', round(stats['game_stats']['kpd'], 2))]
try:
lines.append(('Environmental Deaths',
int(stats['game_stats']['environmental_deaths'])))
except:
pass
message.append('```xl')
width = max(len(k) for k, v in lines)
for line in lines:
message.append('{0:<{width}} : {1}'.format(*line, width=width))
message.append('```')
await self.bot.say('\n'.join(message))
@overwatch.command(pass_context=True)
async def heroes(self, ctx, tag: player_tag = '', tier=None):
"""Get stats for each played hero.
[tag] can be either BattleTag or a mention to someone in the db
[tier] can be 'quick', 'quickplay', 'qp', 'comp', or 'competitive'
"""
try:
tag, member_id = self.get_tag(ctx, tag)
except KeyError:
await self.bot.say("Not in the db.")
return
if tier is not None:
tier = ow_tier(tier)
else:
try:
tier = self.get_tier(member_id)
except KeyError:
tier = 'competitive'
stats, heroes, tier = await self.fetch_stats(tag, tier)
heroes = heroes['heroes']
if tier == 'general':
tier = 'quickplay'
message = ['{} hero stats:'.format(tier.title())]
width = max(len(k) for k in heroes.keys())
message.append('```xl')
for hero, time in sorted(heroes.items(), key=lambda kv: kv[1],
reverse=True):
message.append('{0:<{width}} : {1}'.format(hero.title(),
time_str(heroes[hero]),
width=width))
message.append('```')
await self.bot.say('\n'.join(message))
@overwatch.command(name='set', aliases=['save'], pass_context=True)
async def ow_set(self, ctx, tag, tier='competitive'):
"""Set your battletag and default tier.
[tag] can be either BattleTag or a mention to someone in the db
[tier] can be 'quick', 'quickplay', 'qp', 'comp', or 'competitive'
"""
tier = ow_tier(tier)
tag = tag[::-1].replace('#', '-', 1)[::-1]
self.idents[ctx.message.author.id] = {'btag': tag, 'tier': tier}
with open(ow_storage, 'w') as fp:
json.dump(self.idents, fp)
await self.bot.say('\N{OK HAND SIGN} Added to db.')
def setup(bot):
bot.add_cog(Overwatch(bot))
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pfor and for_loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import flags
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@test_util.run_all_in_graph_and_eager_modes
class PForTest(test.TestCase):
def _run_targets(self, targets1, targets2=None, run_init=True):
targets1 = nest.flatten(targets1)
targets2 = ([] if targets2 is None else nest.flatten(targets2))
assert len(targets1) == len(targets2) or not targets2
if run_init:
init = variables.global_variables_initializer()
self.evaluate(init)
return self.evaluate(targets1 + targets2)
def run_and_assert_equal(self, targets1, targets2):
outputs = self._run_targets(targets1, targets2)
outputs = nest.flatten(outputs) # flatten SparseTensorValues
n = len(outputs) // 2
for i in range(n):
if outputs[i + n].dtype != np.object:
self.assertAllClose(outputs[i + n], outputs[i], rtol=1e-4, atol=1e-5)
else:
self.assertAllEqual(outputs[i + n], outputs[i])
def _test_loop_fn(self, loop_fn, iters,
loop_fn_dtypes=dtypes.float32,
parallel_iterations=None):
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=iters,
parallel_iterations=parallel_iterations)
t2 = pfor_control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, iters=iters,
parallel_iterations=parallel_iterations)
self.run_and_assert_equal(t1, t2)
def test_op_conversion_fallback_to_while_loop(self):
# Note that we used top_k op for this test. If a converter gets defined for
# it, we will need to find another op for which a converter has not been
# defined.
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return nn.top_k(x_i)
with self.assertRaisesRegexp(ValueError, "No converter defined"):
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
flags.FLAGS.op_conversion_fallback_to_while_loop = True
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
flags.FLAGS.op_conversion_fallback_to_while_loop = False
def test_parallel_iterations(self):
for parallel_iterations in [2, 3, 8, 10]:
x = random_ops.random_uniform([8, 3])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.gather(x, i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 8, parallel_iterations=parallel_iterations)
self._test_loop_fn(loop_fn, 4 * constant_op.constant(2),
parallel_iterations=parallel_iterations)
def test_parallel_iterations_zero(self):
with self.assertRaisesRegexp(ValueError, "positive integer"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=0)
with self.assertRaisesRegexp(TypeError, "positive integer"):
pfor_control_flow_ops.for_loop(lambda i: 1, dtypes.int32, 8,
parallel_iterations=0)
def test_parallel_iterations_one(self):
with self.assertRaisesRegexp(ValueError, "Use for_loop instead"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=1)
@test_util.run_all_in_graph_and_eager_modes
class ArrayTest(PForTest):
def test_gather(self):
x = random_ops.random_uniform([3, 3, 3])
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
for y in [x, x_i]:
axes = [0, 2, -1] if y == x else [0]
for axis in axes:
outputs.append(array_ops.gather(y, 2, axis=axis))
outputs.append(array_ops.gather(y, i, axis=axis))
outputs.append(array_ops.gather(y, [i], axis=axis))
outputs.append(array_ops.gather(y, [i, 2], axis=axis))
outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))
return outputs
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 20)
def test_shape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.shape(x_i), array_ops.shape(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_size(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.size(x_i), array_ops.size(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_rank(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.rank(x_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_shape_n(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return array_ops.shape_n([x_i, x, y, y_i]), array_ops.shape_n(
[x_i, x, y, y_i], out_type=dtypes.int64)
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.int32] * 4 + [dtypes.int64] * 4)
def test_reshape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.reshape(x1, [-1]), array_ops.reshape(x1, [1, 3, 1, -1])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_expand_dims(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.expand_dims(
x1, axis=-1), array_ops.expand_dims(
x1, axis=1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_slice(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.slice(x1, begin=(0, 1), size=(2, 1))
self._test_loop_fn(loop_fn, 3)
def test_tile(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [2, 1])
self._test_loop_fn(loop_fn, 3)
def test_tile_loop_dependent(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [i, 1])
with self.assertRaisesRegexp(ValueError, "expected to be loop invariant"):
pfor_control_flow_ops.pfor(loop_fn, 2)
def test_pack(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.stack([x1, y], axis=-1)
self._test_loop_fn(loop_fn, 1)
def test_unpack(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.unstack(
x_i, 4, axis=-1), array_ops.unstack(
x_i, 3, axis=1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 7)
def test_pad(self):
x = random_ops.random_uniform([3, 2, 3])
padding = constant_op.constant([[1, 2], [3, 4]])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.pad(x1, padding, mode="CONSTANT")
self._test_loop_fn(loop_fn, 3)
def test_split(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.split(x1, 2, axis=0), array_ops.split(x1, 3, axis=-1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 5)
def test_split_v(self):
x = random_ops.random_uniform([3, 6, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.split(x1, [2, 1, 3], axis=0),
array_ops.split(x1, [3], axis=-1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 4)
def test_transpose(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.transpose(x1, [2, 1, 0])
self._test_loop_fn(loop_fn, 3)
def test_zeros_like(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
z = array_ops.zeros_like(x1),
return z, z + x1
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_concat_v2(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.concat(
[x1, x1, y], axis=0), array_ops.concat(
[x1, x1, y], axis=-1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_unary_cwise_ops(self):
for op in [array_ops.identity, array_ops.stop_gradient]:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y = op(x1) + x1
loss = nn.l2_loss(y)
return op(x), y, g.gradient(loss, x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_identity_n(self):
x = random_ops.random_uniform([3, 4])
def loop_fn(i):
return array_ops.identity_n([x, array_ops.gather(x, i)])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_matrix_diag_part(self):
x = random_ops.random_uniform([3, 4, 2])
def loop_fn(i):
return array_ops.matrix_diag_part(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
def test_strided_slice(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])
g.watch(x)
def loop_fn(i):
with g:
x_i = array_ops.gather(x, i)
y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
loss = nn.l2_loss(y)
return y, g.gradient(loss, x_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
@test_util.run_all_in_graph_and_eager_modes
class BitwiseTest(PForTest):
def test_unary_cwise(self):
for op in [bitwise_ops.invert]:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
return op(x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_binary_cwise(self):
binary_ops = [
bitwise_ops.bitwise_and,
bitwise_ops.bitwise_or,
bitwise_ops.bitwise_xor,
bitwise_ops.left_shift,
bitwise_ops.right_shift,
]
for op in binary_ops:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
y = random_ops.random_uniform([3, 5], maxval=10, dtype=dtypes.int32)
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
@test_util.run_all_in_graph_and_eager_modes
class MathTest(PForTest):
def test_unary_cwise_ops(self):
complex_ops = [
math_ops.angle,
math_ops.imag,
math_ops.complex_abs,
math_ops.real,
math_ops.conj,
]
real_ops = [
lambda x: math_ops.acosh(1 + math_ops.square(x)),
math_ops.abs,
math_ops.acos,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.bessel_i0e,
math_ops.bessel_i1e,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.exp,
math_ops.expm1,
math_ops.inv,
math_ops.is_finite,
math_ops.is_inf,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
math_ops.neg,
math_ops.negative,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.sigmoid,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
math_ops.tanh,
math_ops.tanh,
nn.elu,
nn.relu,
nn.relu6,
nn.selu,
nn.softplus,
nn.softsign,
]
for op in complex_ops + real_ops:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
if op in complex_ops:
y = random_ops.random_uniform([3, 5])
g.watch(y)
x = math_ops.complex(x, y)
# pylint: disable=cell-var-from-loop
output_dtypes = []
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y1 = op(x1)
outputs = [op(x), y1]
if y1.dtype == dtypes.float32:
loss = math_ops.reduce_sum(y1 * y1)
else:
loss = None
if loss is not None:
grad = g.gradient(loss, x1)
if grad is not None:
outputs.append(grad)
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
def test_unary_cwise_no_grad(self):
for op in [math_ops.ceil,
math_ops.floor,
math_ops.logical_not]:
x = random_ops.random_uniform([3, 5])
if op == math_ops.logical_not:
x = x > 0
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return op(array_ops.gather(x, i))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=x.dtype)
def test_binary_cwise_ops(self):
logical_ops = [
math_ops.logical_and,
math_ops.logical_or,
math_ops.logical_xor
]
# Wrapper functions restricting the range of inputs of zeta and polygamma.
def safe_polygamma(x, y):
return math_ops.polygamma(
math_ops.round(clip_ops.clip_by_value(y, 1, 10)),
x * x + 1)
def safe_zeta(x, y):
return math_ops.zeta(x * x + 1, y * y)
float_ops = [
math_ops.add,
math_ops.add_v2,
math_ops.atan2,
math_ops.complex,
math_ops.div,
math_ops.divide,
math_ops.div_no_nan,
math_ops.equal,
math_ops.floor_div,
math_ops.floor_mod,
math_ops.greater,
math_ops.greater_equal,
math_ops.igamma,
math_ops.igammac,
math_ops.igamma_grad_a,
math_ops.less,
math_ops.less_equal,
math_ops.maximum,
math_ops.minimum,
math_ops.mod,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truncate_mod,
safe_polygamma,
safe_zeta,
]
for op in logical_ops + float_ops:
x = random_ops.random_uniform([7, 3, 5])
y = random_ops.random_uniform([3, 5])
if op in logical_ops:
x = x > 0
y = y > 0
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
def test_approximate_equal(self):
x = random_ops.random_uniform([3, 5])
y = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
return math_ops.approximate_equal(x1, y1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.bool])
def test_addn(self):
x = random_ops.random_uniform([2, 3, 5])
y = random_ops.random_uniform([3, 5])
z = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return math_ops.add_n([x1, y, z])
self._test_loop_fn(loop_fn, 2)
def test_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (5, 3) if tr_a else (3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (7, 5) if tr_b else (5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (4, 5, 3) if tr_a else (4, 3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (4, 7, 5) if tr_b else (4, 5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for op in [
math_ops.reduce_sum, math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min
]:
for axis in ([1], None, [0, 2]):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_cum_sum(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumsum(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_cum_prod(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumprod(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_bias_add(self):
x_shape = [2, 3, 4, 5, 6]
x = random_ops.random_uniform(x_shape)
for data_format in ("NCHW", "NHWC"):
with backprop.GradientTape(persistent=True) as g:
bias_dim = 2 if data_format == "NCHW" else -1
bias_shape = x_shape[bias_dim]
bias = random_ops.random_uniform([bias_shape])
g.watch(bias)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
a = array_ops.gather(x, i)
y = nn.bias_add(a, bias, data_format=data_format)
loss = math_ops.reduce_sum(y * y)
return y, g.gradient(loss, bias)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(
loop_fn, 2, loop_fn_dtypes=[dtypes.float32, dtypes.float32])
def test_unsorted_segment_sum(self):
t = random_ops.random_uniform([3, 3, 2])
segment_ids = constant_op.constant([[0, 0, 2], [0, 1, 2], [2, 2, 2]])
num_segments = 3
def loop_fn(i):
data = array_ops.gather(t, i)
data_0 = array_ops.gather(t, 0)
seg_ids = array_ops.gather(segment_ids, i)
return (math_ops.unsorted_segment_sum(data, seg_ids, num_segments),
math_ops.unsorted_segment_sum(data_0, seg_ids, num_segments))
self._test_loop_fn(loop_fn, 3, [dtypes.float32] * 2)
def test_cast(self):
x = constant_op.constant([[1], [2]])
y = constant_op.constant([[1.0], [2.0]])
def loop_fn(i):
return (math_ops.cast(array_ops.gather(x, i), dtypes.float32),
math_ops.cast(array_ops.gather(y, i), dtypes.int32))
self._test_loop_fn(
loop_fn, 2, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
def test_tanh_axpy(self):
a = constant_op.constant(3.)
x = random_ops.random_uniform([4, 5])
y = random_ops.random_uniform([6, 5])
n = x.shape[0]
def loop_fn(i):
return math_ops.tanh(a * array_ops.gather(x, i) + array_ops.gather(y, i))
self._test_loop_fn(loop_fn, n)
def test_select(self):
cond = constant_op.constant([True, False])
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
for cond_shape in [2], [2, 3], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class NNTest(PForTest):
def test_conv2d(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filt = random_ops.random_uniform([3, 3, 3, 7])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return nn.conv2d(
x1, filt, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_input(self):
x_shape = [2, 12, 12, 3]
filt = random_ops.random_uniform([3, 3, 3, 7])
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
return nn.conv2d_backprop_input(
x_shape,
filt,
grad1,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_filter(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
x_0 = array_ops.gather(x, 0)
filter_sizes = [3, 3, 3, 7]
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return [
nn.conv2d_backprop_filter(
inp,
filter_sizes,
grad_i,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC") for inp in [x_i, x_0]
]
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_avg_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool(
x1, ksize, strides=[1, 2, 2, 1], padding="VALID",
data_format="NHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_max_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_max_pool3d(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 1, 3, 3, 1]
strides = [1, 1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_fused_batch_norm(self):
data_formats = ["NHWC"]
if test.is_gpu_available():
data_formats.append("NCHW")
for is_training in (True, False):
for data_format in data_formats:
with backprop.GradientTape(persistent=True) as g:
if data_format == "NCHW":
x = random_ops.random_uniform([3, 1, 2, 5, 5])
else:
x = random_ops.random_uniform([3, 1, 5, 5, 2])
g.watch(x)
scale = random_ops.random_uniform([2])
g.watch(scale)
offset = random_ops.random_uniform([2])
g.watch(offset)
mean = None if is_training else random_ops.random_uniform([2])
variance = None if is_training else random_ops.random_uniform([2])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
outputs = nn.fused_batch_norm(
x1,
scale,
offset,
mean=mean,
variance=variance,
epsilon=0.01,
data_format=data_format,
is_training=is_training)
outputs = list(outputs)
# We only test the first value of outputs when is_training is False.
# It looks like CPU and GPU have different outputs for batch_mean
# and batch_variance for this case.
if not is_training:
outputs[1] = constant_op.constant(0.)
outputs[2] = constant_op.constant(0.)
loss = nn.l2_loss(outputs[0])
gradients = g.gradient(loss, [x1, scale, offset])
return outputs + gradients
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 6)
def test_softmax_cross_entropy_with_logits(self):
with backprop.GradientTape(persistent=True) as g:
logits = random_ops.random_uniform([3, 2, 4])
g.watch(logits)
labels = random_ops.random_uniform([3, 2, 4])
labels /= math_ops.reduce_sum(labels, axis=[2], keepdims=True)
def loop_fn(i):
with g:
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
total_loss = math_ops.reduce_sum(loss)
return loss, g.gradient(total_loss, logits_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
class RandomTest(PForTest):
# The random values generated in the two implementations are not guaranteed to
# match. So we only check the returned shapes.
def run_and_assert_equal(self, targets1, targets2):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
def test_random_uniform(self):
def loop_fn(_):
return random_ops.random_uniform([3])
self._test_loop_fn(loop_fn, 5)
def test_random_uniform_int(self):
def loop_fn(_):
return random_ops.random_uniform([3], maxval=1, dtype=dtypes.int32)
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=dtypes.int32)
def test_random_standard_normal(self):
def loop_fn(_):
return random_ops.random_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_truncated_normal(self):
def loop_fn(_):
return random_ops.truncated_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma(self):
def loop_fn(_):
return random_ops.random_gamma([3], alpha=[0.5])
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2(self):
def loop_fn(_):
return random_ops.random_poisson(lam=[1.3], shape=[3])
self._test_loop_fn(loop_fn, 5)
class LoggingTest(PForTest):
def test_print(self):
x = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return logging_ops.Print(
x1, [x1, "x1", array_ops.shape(x1)], summarize=10)
self._test_loop_fn(loop_fn, 3)
def test_assert(self):
def loop_fn(i):
return control_flow_ops.Assert(i < 10, [i, [10], [i + 1]])
# TODO(agarwal): make this work with for_loop.
with session.Session() as sess:
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))
class TensorArrayTest(PForTest):
def test_create_outside_and_read(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.read(i), ta.read(0)
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
def test_create_outside_and_gather(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.gather([i]), ta.gather([0, 1])
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
def test_create_outside_and_write_and_scatter(self):
t = tensor_array_ops.TensorArray(dtypes.int32, 10, clear_after_read=False)
handle = t.handle
def loop_fn(i):
ta = t.write(i + 2, 2 * i).write(i, 5)
ta = ta.scatter([4 + i], [4]).scatter([6 + i, 8 + i], [6 + i, 8 + i])
return ta.flow
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
out1 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t1[-1]).stack()
output1 = self._run_targets(out1)
t2 = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, iters=2)
out2 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t2[-1]).stack()
output2 = self._run_targets(out2)
self.assertAllClose(output2, output1)
def test_create_inside_and_write(self):
def loop_fn(i):
# TODO(agarwal): switching the order of writes to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).write(0, i).write(
1, 1)
ta2 = tensor_array_ops.TensorArray(dtypes.int32, 1).write(0, 1)
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
def test_create_inside_and_scatter(self):
def loop_fn(i):
# TODO(agarwal): switching the order of scatter to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).scatter(
[0], [[i, 2]]).scatter([1], [[1, 2]])
ta2 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0], [3]).scatter([1], [4])
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
def test_create_inside_and_read(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.read(0), ta2.read(0), ta2.read(i)
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
def test_create_inside_and_gather(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.gather([0, 1]), ta2.gather([0, 1]), ta2.gather([i])
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
def test_grad(self):
x = random_ops.random_uniform([3, 2])
ta = tensor_array_ops.TensorArray(
dtypes.float32, 3, clear_after_read=False).unstack(x)
y = math_ops.square(ta.stack())
def loop_fn(i):
y_i = array_ops.gather(y, i)
grad = gradient_ops.gradients(y_i, x)[0]
return array_ops.gather(grad, i)
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# y = x * x. Hence dy/dx = 2 * x.
actual_grad = 2.0 * x
with session.Session() as sess:
actual_grad, computed_grad = sess.run([t1, actual_grad])
self.assertAllClose(actual_grad, computed_grad)
class StackTest(PForTest):
def test_stack_inside_loop_invariant(self):
def loop_fn(_):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, 1)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
def test_stack_inside_push_loop_dependent(self):
def loop_fn(i):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, i)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
def test_stack_outside_pop(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op = data_flow_ops.stack_push_v2(s, 5)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 6)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 7)
def loop_fn(_):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e1]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
with ops.control_dependencies([op]):
e1, e2 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
with ops.control_dependencies([e1, e2]):
e3 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
v1, v2, v3 = self._run_targets([e1, e2, e3], run_init=False)
self.assertAllEqual([7, 7], v1)
self.assertAllEqual([6, 6], v2)
self.assertAllEqual(5, v3)
def test_stack_outside_push(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
def loop_fn(_):
return data_flow_ops.stack_push_v2(s, 7)
with self.assertRaisesRegexp(ValueError, "StackPushV2 not allowed.*"):
pfor_control_flow_ops.pfor(loop_fn, iters=2)
# TODO(agarwal): test nested while_loops. This currently requires converting a
# tf.cond.
class ControlFlowTest(PForTest):
def test_while_outside_loop(self):
x = control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return x + i
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_while_with_stateful_ops(self):
def loop_fn(_):
return control_flow_ops.while_loop(
lambda j, x: j < 4,
lambda j, x: (j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4,
lambda j, x: (j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int32])
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
_, total = control_flow_ops.while_loop(
lambda j, _: j < lengths_i,
lambda j, t: (j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
return total
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
def test_while_jacobian(self):
x = random_ops.random_uniform([1, 3])
y = random_ops.random_uniform([3, 3])
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# The above code does not work with tf.while_loop instead of pfor. So we
# manually compute the expected output here.
# Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
expected_output = y
for _ in range(3):
expected_output = math_ops.matmul(expected_output, y)
expected_output = array_ops.transpose(expected_output, [1, 0])
with session.Session() as sess:
out, expected = sess.run([out, expected_output])
self.assertAllClose(expected, out)
def test_tensor_array_as_loop_variable(self):
def loop_fn(i):
def body(j, ta):
ta = ta.write(j, i + j * j)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))
return ta.stack()
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_read_tensor_array_partitioned_indices(self):
# Note that tensor array values are pfor loop dependent, and the while loop
# termination condition is also dependent on pfor iteration.
def loop_fn(i):
ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)
ta = ta.unstack(i + list(range(5)))
def body(j, s):
return j + 1, s + ta.read(j)
_, s = control_flow_ops.while_loop(lambda j, _: j < i,
body,
(0, 0))
return s
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_external_while_loop_grad(self):
# Here we test that external while_loops that are extended from inside pfor
# (due to gradient calls) are not actually converted. If the below was
# converted all pfor iterations would write to the same tensor array
# indices.
x = constant_op.constant(1.)
def body(j, ta):
ta = ta.write(j, x)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))
out = ta.stack()
def loop_fn(i):
out_i = array_ops.gather(out, i)
return gradient_ops.gradients(out_i, x)[0]
with session.Session() as sess:
# out is [x, x, x]. Hence the gradients should be [1, 1, 1].
self.assertAllEqual([1, 1, 1],
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))
def test_tensor_array_grad(self):
inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
ta = ta.unstack(inp)
def loop_fn(i):
def body(j, x):
value = ta.gather([j])
value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
return j + 1, x + value
_, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
(0, array_ops.zeros([2])))
out = math_ops.reduce_prod(out)
return out, gradient_ops.gradients(out, inp)[0]
pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
# Note that tf.while_loop does not work in the setup above. So we manually
# construct the equivalent computation of the above loops here.
real_out = math_ops.reduce_sum(inp, axis=[0])
real_out = math_ops.reduce_prod(real_out, axis=[1])
# Note that gradients of real_out will accumulate the gradients across the
# output value. Hence we do the same aggregation on pfor_out_grad.
real_out_grad = gradient_ops.gradients(real_out, inp)[0]
sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])
with session.Session() as sess:
v1, v2, v1_grad, v2_grad = sess.run(
[pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
self.assertAllClose(v1, v2)
self.assertAllClose(v1_grad, v2_grad)
def dynamic_lstm_input_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)
sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)
return inputs, sequence_length
def create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):
cell = cell_fn(state_size)
inputs, sequence_length = dynamic_lstm_input_fn(batch_size,
state_size,
max_steps)
inputs_ta = tensor_array_ops.TensorArray(
dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])
inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])
inputs_ta = inputs_ta.unstack(inputs_time_major)
zeros = array_ops.zeros([state_size])
def loop_fn(i):
sequence_length_i = array_ops.gather(sequence_length, i)
def body_fn(t, state, ta):
inputs_t = array_ops.expand_dims(
array_ops.gather(inputs_ta.read(t), i), 0)
output, new_state = cell(inputs_t, state)
output = array_ops.reshape(output, [-1])
# TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
# array_ops.where when t < min(sequence_length). Doing that requires
# supporting tf.cond pfor conversion.
done = t >= sequence_length_i
output = array_ops.where(done, zeros, output)
ta = ta.write(t, output)
new_state = [array_ops.where(done, s, ns) for s, ns in
zip(nest.flatten(state), nest.flatten(new_state))]
new_state = nest.pack_sequence_as(state, new_state)
return t + 1, new_state, ta
def condition_fn(t, _, unused):
del unused
return t < max_steps
initial_state = cell.zero_state(1, dtypes.float32)
_, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
0, initial_state,
tensor_array_ops.TensorArray(dtypes.float32, max_steps)
])
new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
new_state = nest.pack_sequence_as(initial_state, new_state)
return ta.stack(), new_state
pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)
tf_output = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float32))
return pfor_output, tf_output
class RNNTest(PForTest):
def test_dynamic_rnn(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
3, 5, 7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
def test_dynamic_lstm(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell,
3, 5, 7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop
# conversion don't look good. Some of it seems like lot of copies between host
# and device. Optimize that.
class Benchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
sess.run(init)
run_fn = sess.make_callable(targets)
run_fn() # Warm up
begin = time.time()
for _ in range(iters):
run_fn()
end = time.time()
avg_time_ms = 1000 * (end - begin) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_sess_run_overhead(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0)
self._run(x, 10000, name="session_run_overhead")
def benchmark_add(self):
with ops.Graph().as_default():
n = 256
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([n, params])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return x_i + y_i
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = x + y
self._run(manual, 1000, name="manual_add")
self._run(pfor_outputs, 1000, name="pfor_add")
self._run(while_outputs, 100, name="while_add")
def benchmark_matmul(self):
with ops.Graph().as_default():
n = 1024
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([params, params])
def loop_fn(i):
x_i = array_ops.expand_dims(array_ops.gather(x, i), 0)
return math_ops.matmul(x_i, y)
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = math_ops.matmul(x, y)
self._run(manual, 1000, name="manual_matmul")
self._run(pfor_outputs, 1000, name="pfor_matmul")
self._run(while_outputs, 100, name="while_matmul")
def benchmark_map_fn(self):
with ops.Graph().as_default():
b = 256
params = 1000
inp = random_ops.random_normal((b, params))
map_fn = lambda x: x * x
def pfor_map_fn(f, x):
return pfor_control_flow_ops.pfor(
lambda i: f(array_ops.gather(x, i)),
array_ops.shape(x)[0])
map_output = functional_ops.map_fn(map_fn, inp)
pfor_output = pfor_map_fn(map_fn, inp)
self._run(map_output, 100, name="tf_map_fn")
self._run(pfor_output, 100, name="pfor_map_fn")
def benchmark_basic_while(self):
with ops.Graph().as_default():
def loop_fn(i):
_, s = control_flow_ops.while_loop(
lambda t, x: t < i,
lambda t, x: (t + 1, x + i),
[0, 0])
return s
iters = 50
pfor_output = pfor_control_flow_ops.pfor(loop_fn, iters)
for_loop_output = pfor_control_flow_ops.for_loop(loop_fn, dtypes.int32,
iters)
self._run(pfor_output, 100, name="pfor_basic")
self._run(for_loop_output, 100, name="for_loop_basic")
def benchmark_dynamic_rnn(self):
with ops.Graph().as_default():
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
128, 512, 16)
self._run(pfor_outputs, 100, name="pfor_rnn")
self._run(tf_outputs, 100, name="tf_rnn")
class SparseTest(PForTest):
def test_var_loop_len(self):
num_iters = array_ops.placeholder(dtypes.int32)
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 3})
def test_sparse_result_none_stacked(self):
num_iters = 10
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
indices = [[i, j] for i in range(num_iters) for j in range(3)]
values = [4, 5, 6] * num_iters
dense_shapes = [num_iters, 3]
# Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]
manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)
self.run_and_assert_equal(pfor, manual)
def test_sparse_result_all_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]
# Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
def test_sparse_result_indices_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, [1], [num_iters])
# Expected result: identity matrix size num_iters * num_iters
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
def test_sparse_result_values_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]
# Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
def test_sparse_result_shapes_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]
# Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
def test_sparse_result_shapes_stacked_2D(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)
shape = array_ops.concat([i, i], 0)
return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]
# Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],
[1] * num_iters,
(num_iters, num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
class ParsingTest(PForTest):
def test_decode_csv(self):
csv_tensor = constant_op.constant([["1:2:3"], ["::"], ["7:8:9"]])
kwargs = {"record_defaults": [[10], [20], [30]], "field_delim": ":"}
def loop_fn(i):
line = array_ops.gather(csv_tensor, i)
return parsing_ops.decode_csv(line, **kwargs)
self._test_loop_fn(loop_fn, iters=3, loop_fn_dtypes=[dtypes.int32] * 3)
def test_parse_single_example(self):
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
examples = constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
])
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
def loop_fn(i):
example_proto = array_ops.gather(examples, i)
f = parsing_ops.parse_single_example(example_proto, features)
return f
pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)
manual = parsing_ops.parse_example(examples, features)
self.run_and_assert_equal(pfor, manual)
if __name__ == "__main__":
test.main()
Remove broken test that computed gradients of fused batch norm in inference
mode.
PiperOrigin-RevId: 224431514
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pfor and for_loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import flags
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@test_util.run_all_in_graph_and_eager_modes
class PForTest(test.TestCase):
def _run_targets(self, targets1, targets2=None, run_init=True):
targets1 = nest.flatten(targets1)
targets2 = ([] if targets2 is None else nest.flatten(targets2))
assert len(targets1) == len(targets2) or not targets2
if run_init:
init = variables.global_variables_initializer()
self.evaluate(init)
return self.evaluate(targets1 + targets2)
def run_and_assert_equal(self, targets1, targets2):
outputs = self._run_targets(targets1, targets2)
outputs = nest.flatten(outputs) # flatten SparseTensorValues
n = len(outputs) // 2
for i in range(n):
if outputs[i + n].dtype != np.object:
self.assertAllClose(outputs[i + n], outputs[i], rtol=1e-4, atol=1e-5)
else:
self.assertAllEqual(outputs[i + n], outputs[i])
def _test_loop_fn(self, loop_fn, iters,
loop_fn_dtypes=dtypes.float32,
parallel_iterations=None):
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=iters,
parallel_iterations=parallel_iterations)
t2 = pfor_control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, iters=iters,
parallel_iterations=parallel_iterations)
self.run_and_assert_equal(t1, t2)
def test_op_conversion_fallback_to_while_loop(self):
# Note that we used top_k op for this test. If a converter gets defined for
# it, we will need to find another op for which a converter has not been
# defined.
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return nn.top_k(x_i)
with self.assertRaisesRegexp(ValueError, "No converter defined"):
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
flags.FLAGS.op_conversion_fallback_to_while_loop = True
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
flags.FLAGS.op_conversion_fallback_to_while_loop = False
def test_parallel_iterations(self):
for parallel_iterations in [2, 3, 8, 10]:
x = random_ops.random_uniform([8, 3])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.gather(x, i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 8, parallel_iterations=parallel_iterations)
self._test_loop_fn(loop_fn, 4 * constant_op.constant(2),
parallel_iterations=parallel_iterations)
def test_parallel_iterations_zero(self):
with self.assertRaisesRegexp(ValueError, "positive integer"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=0)
with self.assertRaisesRegexp(TypeError, "positive integer"):
pfor_control_flow_ops.for_loop(lambda i: 1, dtypes.int32, 8,
parallel_iterations=0)
def test_parallel_iterations_one(self):
with self.assertRaisesRegexp(ValueError, "Use for_loop instead"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=1)
@test_util.run_all_in_graph_and_eager_modes
class ArrayTest(PForTest):
def test_gather(self):
x = random_ops.random_uniform([3, 3, 3])
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
for y in [x, x_i]:
axes = [0, 2, -1] if y == x else [0]
for axis in axes:
outputs.append(array_ops.gather(y, 2, axis=axis))
outputs.append(array_ops.gather(y, i, axis=axis))
outputs.append(array_ops.gather(y, [i], axis=axis))
outputs.append(array_ops.gather(y, [i, 2], axis=axis))
outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))
return outputs
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 20)
def test_shape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.shape(x_i), array_ops.shape(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_size(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.size(x_i), array_ops.size(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int64])
def test_rank(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.rank(x_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_shape_n(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return array_ops.shape_n([x_i, x, y, y_i]), array_ops.shape_n(
[x_i, x, y, y_i], out_type=dtypes.int64)
self._test_loop_fn(
loop_fn, 3, loop_fn_dtypes=[dtypes.int32] * 4 + [dtypes.int64] * 4)
def test_reshape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.reshape(x1, [-1]), array_ops.reshape(x1, [1, 3, 1, -1])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_expand_dims(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.expand_dims(
x1, axis=-1), array_ops.expand_dims(
x1, axis=1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_slice(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.slice(x1, begin=(0, 1), size=(2, 1))
self._test_loop_fn(loop_fn, 3)
def test_tile(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [2, 1])
self._test_loop_fn(loop_fn, 3)
def test_tile_loop_dependent(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [i, 1])
with self.assertRaisesRegexp(ValueError, "expected to be loop invariant"):
pfor_control_flow_ops.pfor(loop_fn, 2)
def test_pack(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.stack([x1, y], axis=-1)
self._test_loop_fn(loop_fn, 1)
def test_unpack(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.unstack(
x_i, 4, axis=-1), array_ops.unstack(
x_i, 3, axis=1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 7)
def test_pad(self):
x = random_ops.random_uniform([3, 2, 3])
padding = constant_op.constant([[1, 2], [3, 4]])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.pad(x1, padding, mode="CONSTANT")
self._test_loop_fn(loop_fn, 3)
def test_split(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.split(x1, 2, axis=0), array_ops.split(x1, 3, axis=-1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 5)
def test_split_v(self):
x = random_ops.random_uniform([3, 6, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.split(x1, [2, 1, 3], axis=0),
array_ops.split(x1, [3], axis=-1))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 4)
def test_transpose(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.transpose(x1, [2, 1, 0])
self._test_loop_fn(loop_fn, 3)
def test_zeros_like(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
z = array_ops.zeros_like(x1),
return z, z + x1
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_concat_v2(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.concat(
[x1, x1, y], axis=0), array_ops.concat(
[x1, x1, y], axis=-1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_unary_cwise_ops(self):
for op in [array_ops.identity, array_ops.stop_gradient]:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y = op(x1) + x1
loss = nn.l2_loss(y)
return op(x), y, g.gradient(loss, x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_identity_n(self):
x = random_ops.random_uniform([3, 4])
def loop_fn(i):
return array_ops.identity_n([x, array_ops.gather(x, i)])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_matrix_diag_part(self):
x = random_ops.random_uniform([3, 4, 2])
def loop_fn(i):
return array_ops.matrix_diag_part(array_ops.gather(x, i))
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
def test_strided_slice(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])
g.watch(x)
def loop_fn(i):
with g:
x_i = array_ops.gather(x, i)
y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
loss = nn.l2_loss(y)
return y, g.gradient(loss, x_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
@test_util.run_all_in_graph_and_eager_modes
class BitwiseTest(PForTest):
def test_unary_cwise(self):
for op in [bitwise_ops.invert]:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
return op(x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_binary_cwise(self):
binary_ops = [
bitwise_ops.bitwise_and,
bitwise_ops.bitwise_or,
bitwise_ops.bitwise_xor,
bitwise_ops.left_shift,
bitwise_ops.right_shift,
]
for op in binary_ops:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
y = random_ops.random_uniform([3, 5], maxval=10, dtype=dtypes.int32)
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
@test_util.run_all_in_graph_and_eager_modes
class MathTest(PForTest):
def test_unary_cwise_ops(self):
complex_ops = [
math_ops.angle,
math_ops.imag,
math_ops.complex_abs,
math_ops.real,
math_ops.conj,
]
real_ops = [
lambda x: math_ops.acosh(1 + math_ops.square(x)),
math_ops.abs,
math_ops.acos,
math_ops.asin,
math_ops.asinh,
math_ops.atan,
math_ops.atanh,
math_ops.bessel_i0e,
math_ops.bessel_i1e,
math_ops.cos,
math_ops.cosh,
math_ops.digamma,
math_ops.erf,
math_ops.erfc,
math_ops.exp,
math_ops.expm1,
math_ops.inv,
math_ops.is_finite,
math_ops.is_inf,
math_ops.lgamma,
math_ops.log,
math_ops.log1p,
math_ops.neg,
math_ops.negative,
math_ops.reciprocal,
math_ops.rint,
math_ops.round,
math_ops.rsqrt,
math_ops.sigmoid,
math_ops.sign,
math_ops.sin,
math_ops.sinh,
math_ops.sqrt,
math_ops.square,
math_ops.tan,
math_ops.tanh,
math_ops.tanh,
nn.elu,
nn.relu,
nn.relu6,
nn.selu,
nn.softplus,
nn.softsign,
]
for op in complex_ops + real_ops:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
if op in complex_ops:
y = random_ops.random_uniform([3, 5])
g.watch(y)
x = math_ops.complex(x, y)
# pylint: disable=cell-var-from-loop
output_dtypes = []
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y1 = op(x1)
outputs = [op(x), y1]
if y1.dtype == dtypes.float32:
loss = math_ops.reduce_sum(y1 * y1)
else:
loss = None
if loss is not None:
grad = g.gradient(loss, x1)
if grad is not None:
outputs.append(grad)
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
def test_unary_cwise_no_grad(self):
for op in [math_ops.ceil,
math_ops.floor,
math_ops.logical_not]:
x = random_ops.random_uniform([3, 5])
if op == math_ops.logical_not:
x = x > 0
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return op(array_ops.gather(x, i))
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=x.dtype)
def test_binary_cwise_ops(self):
logical_ops = [
math_ops.logical_and,
math_ops.logical_or,
math_ops.logical_xor
]
# Wrapper functions restricting the range of inputs of zeta and polygamma.
def safe_polygamma(x, y):
return math_ops.polygamma(
math_ops.round(clip_ops.clip_by_value(y, 1, 10)),
x * x + 1)
def safe_zeta(x, y):
return math_ops.zeta(x * x + 1, y * y)
float_ops = [
math_ops.add,
math_ops.add_v2,
math_ops.atan2,
math_ops.complex,
math_ops.div,
math_ops.divide,
math_ops.div_no_nan,
math_ops.equal,
math_ops.floor_div,
math_ops.floor_mod,
math_ops.greater,
math_ops.greater_equal,
math_ops.igamma,
math_ops.igammac,
math_ops.igamma_grad_a,
math_ops.less,
math_ops.less_equal,
math_ops.maximum,
math_ops.minimum,
math_ops.mod,
math_ops.multiply,
math_ops.not_equal,
math_ops.pow,
math_ops.squared_difference,
math_ops.subtract,
math_ops.truncate_mod,
safe_polygamma,
safe_zeta,
]
for op in logical_ops + float_ops:
x = random_ops.random_uniform([7, 3, 5])
y = random_ops.random_uniform([3, 5])
if op in logical_ops:
x = x > 0
y = y > 0
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend([t.dtype for t in outputs])
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=output_dtypes)
def test_approximate_equal(self):
x = random_ops.random_uniform([3, 5])
y = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
return math_ops.approximate_equal(x1, y1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.bool])
def test_addn(self):
x = random_ops.random_uniform([2, 3, 5])
y = random_ops.random_uniform([3, 5])
z = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return math_ops.add_n([x1, y, z])
self._test_loop_fn(loop_fn, 2)
def test_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (5, 3) if tr_a else (3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (7, 5) if tr_b else (5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_batch_matmul(self):
for tr_a in (True, False):
for tr_b in (True, False):
for stack_a in (True, False):
for stack_b in (True, False):
shape_a = (4, 5, 3) if tr_a else (4, 3, 5)
if stack_a:
shape_a = (2,) + shape_a
shape_b = (4, 7, 5) if tr_b else (4, 5, 7)
if stack_b:
shape_b = (2,) + shape_b
x = random_ops.random_uniform(shape_a)
y = random_ops.random_uniform(shape_b)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i) if stack_a else x
b = array_ops.gather(y, i) if stack_b else y
return math_ops.matmul(a, b, transpose_a=tr_a, transpose_b=tr_b)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_reduction(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for op in [
math_ops.reduce_sum, math_ops.reduce_prod, math_ops.reduce_max,
math_ops.reduce_min
]:
for axis in ([1], None, [0, 2]):
for keepdims in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return op(a, axis=axis, keepdims=keepdims)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_cum_sum(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumsum(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_cum_prod(self):
x = random_ops.random_uniform([2, 3, 4, 5])
for axis in (1, -2):
for exclusive in (True, False):
for reverse in (True, False):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a = array_ops.gather(x, i)
return math_ops.cumprod(
a, axis=axis, exclusive=exclusive, reverse=reverse)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
def test_bias_add(self):
x_shape = [2, 3, 4, 5, 6]
x = random_ops.random_uniform(x_shape)
for data_format in ("NCHW", "NHWC"):
with backprop.GradientTape(persistent=True) as g:
bias_dim = 2 if data_format == "NCHW" else -1
bias_shape = x_shape[bias_dim]
bias = random_ops.random_uniform([bias_shape])
g.watch(bias)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
a = array_ops.gather(x, i)
y = nn.bias_add(a, bias, data_format=data_format)
loss = math_ops.reduce_sum(y * y)
return y, g.gradient(loss, bias)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(
loop_fn, 2, loop_fn_dtypes=[dtypes.float32, dtypes.float32])
def test_unsorted_segment_sum(self):
t = random_ops.random_uniform([3, 3, 2])
segment_ids = constant_op.constant([[0, 0, 2], [0, 1, 2], [2, 2, 2]])
num_segments = 3
def loop_fn(i):
data = array_ops.gather(t, i)
data_0 = array_ops.gather(t, 0)
seg_ids = array_ops.gather(segment_ids, i)
return (math_ops.unsorted_segment_sum(data, seg_ids, num_segments),
math_ops.unsorted_segment_sum(data_0, seg_ids, num_segments))
self._test_loop_fn(loop_fn, 3, [dtypes.float32] * 2)
def test_cast(self):
x = constant_op.constant([[1], [2]])
y = constant_op.constant([[1.0], [2.0]])
def loop_fn(i):
return (math_ops.cast(array_ops.gather(x, i), dtypes.float32),
math_ops.cast(array_ops.gather(y, i), dtypes.int32))
self._test_loop_fn(
loop_fn, 2, loop_fn_dtypes=[dtypes.float32, dtypes.int32])
def test_tanh_axpy(self):
a = constant_op.constant(3.)
x = random_ops.random_uniform([4, 5])
y = random_ops.random_uniform([6, 5])
n = x.shape[0]
def loop_fn(i):
return math_ops.tanh(a * array_ops.gather(x, i) + array_ops.gather(y, i))
self._test_loop_fn(loop_fn, n)
def test_select(self):
cond = constant_op.constant([True, False])
a = random_ops.random_uniform([2, 3, 5])
b = random_ops.random_uniform([2, 3, 5])
for cond_shape in [2], [2, 3], [2, 3, 5]:
cond = random_ops.random_uniform(cond_shape) > 0.5
# pylint: disable=cell-var-from-loop
def loop_fn(i):
a_i = array_ops.gather(a, i)
b_i = array_ops.gather(b, i)
cond_i = array_ops.gather(cond, i)
return array_ops.where(cond_i, a_i, b_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class NNTest(PForTest):
def test_conv2d(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filt = random_ops.random_uniform([3, 3, 3, 7])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return nn.conv2d(
x1, filt, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_input(self):
x_shape = [2, 12, 12, 3]
filt = random_ops.random_uniform([3, 3, 3, 7])
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
return nn.conv2d_backprop_input(
x_shape,
filt,
grad1,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_filter(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
x_0 = array_ops.gather(x, 0)
filter_sizes = [3, 3, 3, 7]
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return [
nn.conv2d_backprop_filter(
inp,
filter_sizes,
grad_i,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC") for inp in [x_i, x_0]
]
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_avg_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool(
x1, ksize, strides=[1, 2, 2, 1], padding="VALID",
data_format="NHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
def test_max_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_max_pool3d(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 1, 3, 3, 1]
strides = [1, 1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 3)
def test_fused_batch_norm(self):
data_formats = ["NHWC"]
if test.is_gpu_available():
data_formats.append("NCHW")
for is_training in (True, False):
for data_format in data_formats:
with backprop.GradientTape(persistent=True) as g:
if data_format == "NCHW":
x = random_ops.random_uniform([3, 1, 2, 5, 5])
else:
x = random_ops.random_uniform([3, 1, 5, 5, 2])
g.watch(x)
scale = random_ops.random_uniform([2])
g.watch(scale)
offset = random_ops.random_uniform([2])
g.watch(offset)
mean = None if is_training else random_ops.random_uniform([2])
variance = None if is_training else random_ops.random_uniform([2])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
outputs = nn.fused_batch_norm(
x1,
scale,
offset,
mean=mean,
variance=variance,
epsilon=0.01,
data_format=data_format,
is_training=is_training)
outputs = list(outputs)
# We only test the first value of outputs when is_training is False.
# It looks like CPU and GPU have different outputs for batch_mean
# and batch_variance for this case.
if not is_training:
outputs[1] = constant_op.constant(0.)
outputs[2] = constant_op.constant(0.)
loss = nn.l2_loss(outputs[0])
if is_training:
gradients = g.gradient(loss, [x1, scale, offset])
else:
gradients = [constant_op.constant(0.)] * 3
return outputs + gradients
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 6)
def test_softmax_cross_entropy_with_logits(self):
with backprop.GradientTape(persistent=True) as g:
logits = random_ops.random_uniform([3, 2, 4])
g.watch(logits)
labels = random_ops.random_uniform([3, 2, 4])
labels /= math_ops.reduce_sum(labels, axis=[2], keepdims=True)
def loop_fn(i):
with g:
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
total_loss = math_ops.reduce_sum(loss)
return loss, g.gradient(total_loss, logits_i)
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32] * 2)
class RandomTest(PForTest):
# The random values generated in the two implementations are not guaranteed to
# match. So we only check the returned shapes.
def run_and_assert_equal(self, targets1, targets2):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
def test_random_uniform(self):
def loop_fn(_):
return random_ops.random_uniform([3])
self._test_loop_fn(loop_fn, 5)
def test_random_uniform_int(self):
def loop_fn(_):
return random_ops.random_uniform([3], maxval=1, dtype=dtypes.int32)
self._test_loop_fn(loop_fn, 5, loop_fn_dtypes=dtypes.int32)
def test_random_standard_normal(self):
def loop_fn(_):
return random_ops.random_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_truncated_normal(self):
def loop_fn(_):
return random_ops.truncated_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma(self):
def loop_fn(_):
return random_ops.random_gamma([3], alpha=[0.5])
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2(self):
def loop_fn(_):
return random_ops.random_poisson(lam=[1.3], shape=[3])
self._test_loop_fn(loop_fn, 5)
class LoggingTest(PForTest):
def test_print(self):
x = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return logging_ops.Print(
x1, [x1, "x1", array_ops.shape(x1)], summarize=10)
self._test_loop_fn(loop_fn, 3)
def test_assert(self):
def loop_fn(i):
return control_flow_ops.Assert(i < 10, [i, [10], [i + 1]])
# TODO(agarwal): make this work with for_loop.
with session.Session() as sess:
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))
class TensorArrayTest(PForTest):
def test_create_outside_and_read(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.read(i), ta.read(0)
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
def test_create_outside_and_gather(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.gather([i]), ta.gather([0, 1])
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
def test_create_outside_and_write_and_scatter(self):
t = tensor_array_ops.TensorArray(dtypes.int32, 10, clear_after_read=False)
handle = t.handle
def loop_fn(i):
ta = t.write(i + 2, 2 * i).write(i, 5)
ta = ta.scatter([4 + i], [4]).scatter([6 + i, 8 + i], [6 + i, 8 + i])
return ta.flow
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
out1 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t1[-1]).stack()
output1 = self._run_targets(out1)
t2 = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, iters=2)
out2 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t2[-1]).stack()
output2 = self._run_targets(out2)
self.assertAllClose(output2, output1)
def test_create_inside_and_write(self):
def loop_fn(i):
# TODO(agarwal): switching the order of writes to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).write(0, i).write(
1, 1)
ta2 = tensor_array_ops.TensorArray(dtypes.int32, 1).write(0, 1)
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
def test_create_inside_and_scatter(self):
def loop_fn(i):
# TODO(agarwal): switching the order of scatter to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).scatter(
[0], [[i, 2]]).scatter([1], [[1, 2]])
ta2 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0], [3]).scatter([1], [4])
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3, [dtypes.int32] * 2)
def test_create_inside_and_read(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.read(0), ta2.read(0), ta2.read(i)
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
def test_create_inside_and_gather(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.gather([0, 1]), ta2.gather([0, 1]), ta2.gather([i])
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 3)
def test_grad(self):
x = random_ops.random_uniform([3, 2])
ta = tensor_array_ops.TensorArray(
dtypes.float32, 3, clear_after_read=False).unstack(x)
y = math_ops.square(ta.stack())
def loop_fn(i):
y_i = array_ops.gather(y, i)
grad = gradient_ops.gradients(y_i, x)[0]
return array_ops.gather(grad, i)
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# y = x * x. Hence dy/dx = 2 * x.
actual_grad = 2.0 * x
with session.Session() as sess:
actual_grad, computed_grad = sess.run([t1, actual_grad])
self.assertAllClose(actual_grad, computed_grad)
class StackTest(PForTest):
def test_stack_inside_loop_invariant(self):
def loop_fn(_):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, 1)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
def test_stack_inside_push_loop_dependent(self):
def loop_fn(i):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, i)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2, [dtypes.int32] * 2)
def test_stack_outside_pop(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op = data_flow_ops.stack_push_v2(s, 5)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 6)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 7)
def loop_fn(_):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e1]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
with ops.control_dependencies([op]):
e1, e2 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
with ops.control_dependencies([e1, e2]):
e3 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
v1, v2, v3 = self._run_targets([e1, e2, e3], run_init=False)
self.assertAllEqual([7, 7], v1)
self.assertAllEqual([6, 6], v2)
self.assertAllEqual(5, v3)
def test_stack_outside_push(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
def loop_fn(_):
return data_flow_ops.stack_push_v2(s, 7)
with self.assertRaisesRegexp(ValueError, "StackPushV2 not allowed.*"):
pfor_control_flow_ops.pfor(loop_fn, iters=2)
# TODO(agarwal): test nested while_loops. This currently requires converting a
# tf.cond.
class ControlFlowTest(PForTest):
def test_while_outside_loop(self):
x = control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return x + i
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_while_with_stateful_ops(self):
def loop_fn(_):
return control_flow_ops.while_loop(
lambda j, x: j < 4,
lambda j, x: (j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4,
lambda j, x: (j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32, dtypes.int32])
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
_, total = control_flow_ops.while_loop(
lambda j, _: j < lengths_i,
lambda j, t: (j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
return total
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.float32])
def test_while_jacobian(self):
x = random_ops.random_uniform([1, 3])
y = random_ops.random_uniform([3, 3])
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# The above code does not work with tf.while_loop instead of pfor. So we
# manually compute the expected output here.
# Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
expected_output = y
for _ in range(3):
expected_output = math_ops.matmul(expected_output, y)
expected_output = array_ops.transpose(expected_output, [1, 0])
with session.Session() as sess:
out, expected = sess.run([out, expected_output])
self.assertAllClose(expected, out)
def test_tensor_array_as_loop_variable(self):
def loop_fn(i):
def body(j, ta):
ta = ta.write(j, i + j * j)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))
return ta.stack()
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_read_tensor_array_partitioned_indices(self):
# Note that tensor array values are pfor loop dependent, and the while loop
# termination condition is also dependent on pfor iteration.
def loop_fn(i):
ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)
ta = ta.unstack(i + list(range(5)))
def body(j, s):
return j + 1, s + ta.read(j)
_, s = control_flow_ops.while_loop(lambda j, _: j < i,
body,
(0, 0))
return s
self._test_loop_fn(loop_fn, 3, loop_fn_dtypes=[dtypes.int32])
def test_external_while_loop_grad(self):
# Here we test that external while_loops that are extended from inside pfor
# (due to gradient calls) are not actually converted. If the below was
# converted all pfor iterations would write to the same tensor array
# indices.
x = constant_op.constant(1.)
def body(j, ta):
ta = ta.write(j, x)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))
out = ta.stack()
def loop_fn(i):
out_i = array_ops.gather(out, i)
return gradient_ops.gradients(out_i, x)[0]
with session.Session() as sess:
# out is [x, x, x]. Hence the gradients should be [1, 1, 1].
self.assertAllEqual([1, 1, 1],
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))
def test_tensor_array_grad(self):
inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
ta = ta.unstack(inp)
def loop_fn(i):
def body(j, x):
value = ta.gather([j])
value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
return j + 1, x + value
_, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
(0, array_ops.zeros([2])))
out = math_ops.reduce_prod(out)
return out, gradient_ops.gradients(out, inp)[0]
pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
# Note that tf.while_loop does not work in the setup above. So we manually
# construct the equivalent computation of the above loops here.
real_out = math_ops.reduce_sum(inp, axis=[0])
real_out = math_ops.reduce_prod(real_out, axis=[1])
# Note that gradients of real_out will accumulate the gradients across the
# output value. Hence we do the same aggregation on pfor_out_grad.
real_out_grad = gradient_ops.gradients(real_out, inp)[0]
sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])
with session.Session() as sess:
v1, v2, v1_grad, v2_grad = sess.run(
[pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
self.assertAllClose(v1, v2)
self.assertAllClose(v1_grad, v2_grad)
def dynamic_lstm_input_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)
sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)
return inputs, sequence_length
def create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):
cell = cell_fn(state_size)
inputs, sequence_length = dynamic_lstm_input_fn(batch_size,
state_size,
max_steps)
inputs_ta = tensor_array_ops.TensorArray(
dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])
inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])
inputs_ta = inputs_ta.unstack(inputs_time_major)
zeros = array_ops.zeros([state_size])
def loop_fn(i):
sequence_length_i = array_ops.gather(sequence_length, i)
def body_fn(t, state, ta):
inputs_t = array_ops.expand_dims(
array_ops.gather(inputs_ta.read(t), i), 0)
output, new_state = cell(inputs_t, state)
output = array_ops.reshape(output, [-1])
# TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
# array_ops.where when t < min(sequence_length). Doing that requires
# supporting tf.cond pfor conversion.
done = t >= sequence_length_i
output = array_ops.where(done, zeros, output)
ta = ta.write(t, output)
new_state = [array_ops.where(done, s, ns) for s, ns in
zip(nest.flatten(state), nest.flatten(new_state))]
new_state = nest.pack_sequence_as(state, new_state)
return t + 1, new_state, ta
def condition_fn(t, _, unused):
del unused
return t < max_steps
initial_state = cell.zero_state(1, dtypes.float32)
_, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
0, initial_state,
tensor_array_ops.TensorArray(dtypes.float32, max_steps)
])
new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
new_state = nest.pack_sequence_as(initial_state, new_state)
return ta.stack(), new_state
pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)
tf_output = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float32))
return pfor_output, tf_output
class RNNTest(PForTest):
def test_dynamic_rnn(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
3, 5, 7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
def test_dynamic_lstm(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell,
3, 5, 7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop
# conversion don't look good. Some of it seems like lot of copies between host
# and device. Optimize that.
class Benchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
sess.run(init)
run_fn = sess.make_callable(targets)
run_fn() # Warm up
begin = time.time()
for _ in range(iters):
run_fn()
end = time.time()
avg_time_ms = 1000 * (end - begin) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_sess_run_overhead(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0)
self._run(x, 10000, name="session_run_overhead")
def benchmark_add(self):
with ops.Graph().as_default():
n = 256
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([n, params])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return x_i + y_i
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = x + y
self._run(manual, 1000, name="manual_add")
self._run(pfor_outputs, 1000, name="pfor_add")
self._run(while_outputs, 100, name="while_add")
def benchmark_matmul(self):
with ops.Graph().as_default():
n = 1024
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([params, params])
def loop_fn(i):
x_i = array_ops.expand_dims(array_ops.gather(x, i), 0)
return math_ops.matmul(x_i, y)
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = math_ops.matmul(x, y)
self._run(manual, 1000, name="manual_matmul")
self._run(pfor_outputs, 1000, name="pfor_matmul")
self._run(while_outputs, 100, name="while_matmul")
def benchmark_map_fn(self):
with ops.Graph().as_default():
b = 256
params = 1000
inp = random_ops.random_normal((b, params))
map_fn = lambda x: x * x
def pfor_map_fn(f, x):
return pfor_control_flow_ops.pfor(
lambda i: f(array_ops.gather(x, i)),
array_ops.shape(x)[0])
map_output = functional_ops.map_fn(map_fn, inp)
pfor_output = pfor_map_fn(map_fn, inp)
self._run(map_output, 100, name="tf_map_fn")
self._run(pfor_output, 100, name="pfor_map_fn")
def benchmark_basic_while(self):
with ops.Graph().as_default():
def loop_fn(i):
_, s = control_flow_ops.while_loop(
lambda t, x: t < i,
lambda t, x: (t + 1, x + i),
[0, 0])
return s
iters = 50
pfor_output = pfor_control_flow_ops.pfor(loop_fn, iters)
for_loop_output = pfor_control_flow_ops.for_loop(loop_fn, dtypes.int32,
iters)
self._run(pfor_output, 100, name="pfor_basic")
self._run(for_loop_output, 100, name="for_loop_basic")
def benchmark_dynamic_rnn(self):
with ops.Graph().as_default():
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell,
128, 512, 16)
self._run(pfor_outputs, 100, name="pfor_rnn")
self._run(tf_outputs, 100, name="tf_rnn")
class SparseTest(PForTest):
def test_var_loop_len(self):
num_iters = array_ops.placeholder(dtypes.int32)
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 3})
def test_sparse_result_none_stacked(self):
num_iters = 10
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
indices = [[i, j] for i in range(num_iters) for j in range(3)]
values = [4, 5, 6] * num_iters
dense_shapes = [num_iters, 3]
# Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]
manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)
self.run_and_assert_equal(pfor, manual)
def test_sparse_result_all_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]
# Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
def test_sparse_result_indices_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, [1], [num_iters])
# Expected result: identity matrix size num_iters * num_iters
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
def test_sparse_result_values_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]
# Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
def test_sparse_result_shapes_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]
# Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
def test_sparse_result_shapes_stacked_2D(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)
shape = array_ops.concat([i, i], 0)
return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]
# Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],
[1] * num_iters,
(num_iters, num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
class ParsingTest(PForTest):
def test_decode_csv(self):
csv_tensor = constant_op.constant([["1:2:3"], ["::"], ["7:8:9"]])
kwargs = {"record_defaults": [[10], [20], [30]], "field_delim": ":"}
def loop_fn(i):
line = array_ops.gather(csv_tensor, i)
return parsing_ops.decode_csv(line, **kwargs)
self._test_loop_fn(loop_fn, iters=3, loop_fn_dtypes=[dtypes.int32] * 3)
def test_parse_single_example(self):
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
examples = constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
])
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
def loop_fn(i):
example_proto = array_ops.gather(examples, i)
f = parsing_ops.parse_single_example(example_proto, features)
return f
pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)
manual = parsing_ops.parse_example(examples, features)
self.run_and_assert_equal(pfor, manual)
if __name__ == "__main__":
test.main()
|
import asyncio
import json
import os
import re
import time
import traceback
import concurrent.futures
from urllib.parse import urlparse
import feedparser
from aiohttp import web
import ssl
import bcrypt
import uuid
from jinja2 import Environment, FileSystemLoader, select_autoescape
import appdaemon.dashboard as addashboard
import appdaemon.utils as utils
import appdaemon.stream as stream
import appdaemon.admin as adadmin
from appdaemon.appdaemon import AppDaemon
def securedata(myfunc):
"""
Take care of streams and service calls
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None:
return await myfunc(*args)
elif "adcreds" in request.cookies:
match = await utils.run_in_executor(self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"]))
if match:
return await myfunc(*args)
elif ("x-ad-access" in request.headers) and (request.headers["x-ad-access"] == self.password):
return await myfunc(*args)
elif "api_password" in request.query and request.query["api_password"] == self.password:
return await myfunc(*args)
else:
return self.get_response(request, "401", "Unauthorized")
return wrapper
def secure(myfunc):
"""
Take care of screen based security
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None:
return await myfunc(*args)
else:
if "adcreds" in request.cookies:
match = await utils.run_in_executor(self, bcrypt.checkpw,
str.encode(self.password),
str.encode(request.cookies["adcreds"]))
if match:
return await myfunc(*args)
else:
return await self.forcelogon(request)
else:
return await self.forcelogon(request)
return wrapper
def app_secure(myfunc):
"""
Take care of streams and service calls
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None or self.valid_tokens == []:
return await myfunc(*args)
elif "adcreds" in request.cookies:
match = await utils.run_in_executor(self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"]))
if match:
return await myfunc(*args)
elif "token" in request.query and request.query["token"] in self.valid_tokens:
return await myfunc(*args)
else:
return self.get_response(request, "401", "Unauthorized")
return wrapper
class HTTP:
def __init__(self, ad: AppDaemon, loop, logging, appdaemon, dashboard, admin, api, http):
self.AD = ad
self.logging = logging
self.logger = ad.logging.get_child("_http")
self.access = ad.logging.get_access()
self.appdaemon = appdaemon
self.dashboard = dashboard
self.dashboard_dir = None
self.admin = admin
self.http = http
self.api = api
self.template_dir = os.path.join(os.path.dirname(__file__), "assets", "templates")
self.password = None
self._process_arg("password", http)
self.valid_tokens = []
self._process_arg("tokens", http)
self.url = None
self._process_arg("url", http)
self.work_factor = 8
self._process_arg("work_factor", http)
self.ssl_certificate = None
self._process_arg("ssl_certificate", http)
self.ssl_key = None
self._process_arg("ssl_key", http)
self.transport = "ws"
self._process_arg("transport", http)
self.logger.info("Using '%s' for event stream", self.transport)
self.config_dir = None
self._process_arg("config_dir", dashboard)
self.stopping = False
self.endpoints = {}
self.app_routes = {}
self.dashboard_obj = None
self.admin_obj = None
self.install_dir = os.path.dirname(__file__)
self.javascript_dir = os.path.join(self.install_dir, "assets", "javascript")
self.template_dir = os.path.join(self.install_dir, "assets", "templates")
self.css_dir = os.path.join(self.install_dir, "assets", "css")
self.fonts_dir = os.path.join(self.install_dir, "assets", "fonts")
self.webfonts_dir = os.path.join(self.install_dir, "assets", "webfonts")
self.images_dir = os.path.join(self.install_dir, "assets", "images")
try:
url = urlparse(self.url)
net = url.netloc.split(":")
self.host = net[0]
try:
self.port = net[1]
except IndexError:
self.port = 80
if self.host == "":
raise ValueError("Invalid host for 'url'")
self.app = web.Application()
if "headers" in self.http:
self.app.on_response_prepare.append(self.add_response_headers)
# Setup event stream
self.stream = stream.ADStream(self.AD, self.app, self.transport)
self.loop = loop
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
#TODO the `context` local varialble is never used after its initialization, maybe it can be removed
if self.ssl_certificate is not None and self.ssl_key is not None:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(self.ssl_certificate, self.ssl_key)
else:
context = None
self.setup_http_routes()
#
# API
#
if api is not None:
self.logger.info("Starting API")
self.setup_api_routes()
else:
self.logger.info("API is disabled")
#
# Admin
#
if admin is not None:
self.logger.info("Starting Admin Interface")
self.stats_update = "realtime"
self._process_arg("stats_update", admin)
self.admin_obj = adadmin.Admin(self.config_dir, logging, self.AD,
javascript_dir=self.javascript_dir,
template_dir=self.template_dir,
css_dir=self.css_dir,
fonts_dir=self.fonts_dir,
webfonts_dir=self.webfonts_dir,
images_dir=self.images_dir,
**admin
)
else:
self.logger.info("Admin Interface is disabled")
#
# Dashboards
#
if dashboard is not None:
self.logger.info("Starting Dashboards")
self._process_arg("dashboard_dir", dashboard)
self.compile_on_start = True
self._process_arg("compile_on_start", dashboard)
self.force_compile = False
self._process_arg("force_compile", dashboard)
self.profile_dashboard = False
self._process_arg("profile_dashboard", dashboard)
self.rss_feeds = None
self._process_arg("rss_feeds", dashboard)
self.fa4compatibility = False
self._process_arg("fa4compatibility", dashboard)
if "rss_feeds" in dashboard:
self.rss_feeds = []
for feed in dashboard["rss_feeds"]:
if feed["target"].count('.') != 1:
self.logger.warning("Invalid RSS feed target: %s", feed["target"])
else:
self.rss_feeds.append(feed)
self.rss_update = None
self._process_arg("rss_update", dashboard)
self.rss_last_update = None
# find dashboard dir
if self.dashboard_dir is None:
if self.config_dir is None:
self.dashboard_dir = utils.find_path("dashboards")
else:
self.dashboard_dir = os.path.join(self.config_dir, "dashboards")
self.javascript_dir = os.path.join(self.install_dir, "assets", "javascript")
self.template_dir = os.path.join(self.install_dir, "assets", "templates")
self.css_dir = os.path.join(self.install_dir, "assets", "css")
self.fonts_dir = os.path.join(self.install_dir, "assets", "fonts")
self.webfonts_dir = os.path.join(self.install_dir, "assets", "webfonts")
self.images_dir = os.path.join(self.install_dir, "assets", "images")
#
# Setup compile directories
#
if self.config_dir is None:
self.compile_dir = utils.find_path("compiled")
else:
self.compile_dir = os.path.join(self.config_dir, "compiled")
self.dashboard_obj = addashboard.Dashboard(self.config_dir, self.logging,
dash_compile_on_start=self.compile_on_start,
dash_force_compile=self.force_compile,
profile_dashboard=self.profile_dashboard,
dashboard_dir=self.dashboard_dir,
fa4compatibility=self.fa4compatibility,
transport=self.transport,
javascript_dir=self.javascript_dir,
template_dir=self.template_dir,
css_dir=self.css_dir,
fonts_dir=self.fonts_dir,
webfonts_dir=self.webfonts_dir,
images_dir=self.images_dir)
self.setup_dashboard_routes()
else:
self.logger.info("Dashboards Disabled")
#
# Finish up and start the server
#
#handler = self.app.make_handler()
#f = loop.create_server(handler, "0.0.0.0", int(self.port), ssl=context)
#loop.create_task(f)
if self.dashboard_obj is not None:
loop.create_task(self.update_rss())
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in HTTP module")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
async def start_server(self):
self.logger.info("Running on port %s", self.port)
self.runner = web.AppRunner(self.app)
await self.runner.setup()
site = web.TCPSite(self.runner, '0.0.0.0', int(self.port))
await site.start()
async def stop_server(self):
self.logger.info("Shutting down webserver")
#
# We sjould do this nut it makes AD hang so ...
#
#await self.runner.cleanup()
async def add_response_headers(self, request, response):
for header, value in self.http['headers'].items():
response.headers[header] = value
def stop(self):
self.stopping = True
def _process_arg(self, arg, kwargs):
if kwargs:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
@staticmethod
def check_password(password, hash):
return bcrypt.checkpw, str.encode(password), str.encode(hash)
async def forcelogon(self, request):
response = await self.logon_page(request)
return response
async def logon_response(self, request):
try:
data = await request.post()
password = data["password"]
if password == self.password:
self.access.info("Succesful logon from %s", request.host)
hashed = bcrypt.hashpw(str.encode(self.password), bcrypt.gensalt(self.work_factor))
if self.admin is not None:
response = await self._admin_page(request)
else:
response = await self._list_dash(request)
self.logger.debug("hashed=%s", hashed)
# Set cookie to last for 1 year
response.set_cookie("adcreds", hashed.decode("utf-8"), max_age=31536000)
else:
self.access.warning("Unsuccessful logon from %s", request.host)
response = await self.logon_page(request)
return response
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in logon_response()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Server error in logon_response()")
# noinspection PyUnusedLocal
@secure
async def list_dash(self, request):
return await self._list_dash(request)
async def _list_dash(self, request):
response = await utils.run_in_executor(self, self.dashboard_obj.get_dashboard_list)
return web.Response(text=response, content_type="text/html")
@secure
async def load_dash(self, request):
name = request.match_info.get('name', "Anonymous")
params = request.query
skin = params.get("skin", "default")
recompile = params.get("recompile", False)
if recompile == '1':
recompile = True
response = await utils.run_in_executor(self, self.dashboard_obj.get_dashboard, name, skin, recompile)
return web.Response(text=response, content_type="text/html")
async def update_rss(self):
# Grab RSS Feeds
if self.rss_feeds is not None and self.rss_update is not None:
while not self.stopping:
try:
if self.rss_last_update is None or (self.rss_last_update + self.rss_update) <= time.time():
self.rss_last_update = time.time()
for feed_data in self.rss_feeds:
feed = await utils.run_in_executor(self, feedparser.parse, feed_data["feed"])
if "bozo_exception" in feed:
self.logger.warning("Error in RSS feed %s: %s", feed_data["feed"], feed["bozo_exception"])
else:
new_state = {"feed": feed}
# RSS Feeds always live in the admin namespace
await self.AD.state.set_state("rss", "admin", feed_data["target"], state=new_state)
await asyncio.sleep(1)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in update_rss()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
#
# REST API
#
@securedata
async def get_ad(self, request):
return web.json_response({"state": {"status": "active"}}, dumps=utils.convert_json)
@securedata
async def get_entity(self, request):
namespace = None
entity_id = None
try:
entity_id = request.match_info.get('entity')
namespace = request.match_info.get('namespace')
self.logger.debug("get_state() called, ns=%s, entity=%s", namespace, entity_id)
state = self.AD.state.get_entity(namespace, entity_id)
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_entity()")
self.logger.warning("Namespace: %s, entity: %s", namespace, entity_id)
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_entity()")
@securedata
async def get_namespace(self, request):
namespace = None
try:
namespace = request.match_info.get('namespace')
self.logger.debug("get_namespace() called, ns=%s", namespace)
state = self.AD.state.get_entity(namespace)
self.logger.debug("result = %s", state)
if state is None:
return self.get_response(request, 404, "Namespace Not Found")
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_namespace()")
self.logger.warning("Namespace: %s", namespace)
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_namespace()")
@securedata
async def get_namespace_entities(self, request):
namespace = None
try:
namespace = request.match_info.get('namespace')
self.logger.debug("get_namespace_entities() called, ns=%s", namespace)
state = self.AD.state.list_namespace_entities(namespace)
self.logger.debug("result = %s", state)
if state is None:
return self.get_response(request, 404, "Namespace Not Found")
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_namespace_entities()")
self.logger.warning("Namespace: %s", namespace)
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_namespace_entities()")
@securedata
async def get_namespaces(self, request):
try:
self.logger.debug("get_namespaces() called)")
state = await self.AD.state.list_namespaces()
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_namespaces()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_namespaces()")
@securedata
async def get_services(self, request):
try:
self.logger.debug("get_services() called)")
state = self.AD.services.list_services()
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_services()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_services()")
@securedata
async def get_state(self, request):
try:
self.logger.debug("get_state() called")
state = self.AD.state.get_entity()
if state is None:
self.get_response(request, 404, "State Not Found")
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_state()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_state()")
@securedata
async def get_logs(self, request):
try:
self.logger.debug("get_logs() called")
logs = await utils.run_in_executor(self, self.AD.logging.get_admin_logs)
return web.json_response({"logs": logs}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_logs()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_logs()")
# noinspection PyUnusedLocal
@securedata
async def call_service(self, request):
try:
try:
data = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
args = {}
namespace = request.match_info.get('namespace')
domain = request.match_info.get('domain')
service = request.match_info.get('service')
#
# Some value munging for dashboard
#
for key in data:
if key == "service":
pass
elif key == "rgb_color":
m = re.search('\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)', data[key])
if m:
r = m.group(1)
g = m.group(2)
b = m.group(3)
args["rgb_color"] = [r, g, b]
elif key == "xy_color":
m = re.search('\s*(\d+\.\d+)\s*,\s*(\d+\.\d+)', data[key])
if m:
x = m.group(1)
y = m.group(2)
args["xy_color"] = [x, y]
elif key == "json_args":
json_args = json.loads(data[key])
for k in json_args.keys():
args[k] = json_args[k]
else:
args[key] = data[key]
self.logger.debug("call_service() args = %s", args)
await self.AD.services.call_service(namespace, domain, service, args)
return web.Response(status=200)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in call_service()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return web.Response(status=500)
@securedata
async def fire_event(self, request):
try:
try:
data = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
args = {}
namespace = request.match_info.get('namespace')
event = request.match_info.get('event')
#
# Some value munging for dashboard
#
for key in data:
if key == "event":
pass
else:
args[key] = data[key]
self.logger.debug("fire_event() args = %s", args)
await self.AD.events.fire_event(namespace, event, **args)
return web.Response(status=200)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in fire_event()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return web.Response(status=500)
# noinspection PyUnusedLocal
async def not_found(self, request):
return self.get_response(request, 404, "Not Found")
# Stream Handling
async def stream_update(self, namespace, data):
#self.logger.debug("stream_update() %s:%s", namespace, data)
data["namespace"] = namespace
self.AD.thread_async.call_async_no_wait(self.stream.send_update, data)
# Routes, Status and Templates
def setup_api_routes(self):
self.app.router.add_post('/api/appdaemon/service/{namespace}/{domain}/{service}', self.call_service)
self.app.router.add_post('/api/appdaemon/event/{namespace}/{event}', self.fire_event)
self.app.router.add_get('/api/appdaemon/service/', self.get_services)
self.app.router.add_get('/api/appdaemon/state/{namespace}/{entity}', self.get_entity)
self.app.router.add_get('/api/appdaemon/state/{namespace}', self.get_namespace)
self.app.router.add_get('/api/appdaemon/state/{namespace}/', self.get_namespace_entities)
self.app.router.add_get('/api/appdaemon/state/', self.get_namespaces)
self.app.router.add_get('/api/appdaemon/state', self.get_state)
self.app.router.add_get('/api/appdaemon/logs', self.get_logs)
self.app.router.add_post('/api/appdaemon/{app}', self.call_api)
self.app.router.add_get('/api/appdaemon', self.get_ad)
def setup_http_routes(self):
self.app.router.add_get('/favicon.ico', self.not_found)
self.app.router.add_get('/{gfx}.png', self.not_found)
self.app.router.add_post('/logon_response', self.logon_response)
# Add static path for JavaScript
self.app.router.add_static('/javascript', self.javascript_dir)
# Add static path for fonts
self.app.router.add_static('/fonts', self.fonts_dir)
# Add static path for webfonts
self.app.router.add_static('/webfonts', self.webfonts_dir)
# Add static path for images
self.app.router.add_static('/images', self.images_dir)
# Add static path for css
self.app.router.add_static('/css', self.css_dir)
if self.admin is not None:
self.app.router.add_get('/', self.admin_page)
elif self.dashboard is not None:
self.app.router.add_get('/', self.list_dash)
else:
self.app.router.add_get('/', self.error_page)
#
# For App based Web Server
#
self.app.router.add_get('/app/{route}', self.app_webserver)
def setup_dashboard_routes(self):
self.app.router.add_get('/list', self.list_dash)
self.app.router.add_get('/{name}', self.load_dash)
# Setup Templates
self.app.router.add_static('/compiled_javascript', self.dashboard_obj.compiled_javascript_dir)
self.app.router.add_static('/compiled_css', self.dashboard_obj.compiled_css_dir)
# Add path for custom_css if it exists
custom_css = os.path.join(self.dashboard_obj.config_dir, "custom_css")
if os.path.isdir(custom_css):
self.app.router.add_static('/custom_css', custom_css)
# API
async def terminate_app(self, name):
if name in self.endpoints:
del self.endpoints[name]
if name in self.app_routes:
del self.app_routes[name]
def get_response(self, request, code, error):
res = "<html><head><title>{} {}</title></head><body><h1>{} {}</h1>Error in API Call</body></html>".format(code, error, code, error)
app = request.match_info.get('app', "system")
if code == 200:
self.access.info("API Call to %s: status: %s", app, code)
else:
self.access.warning("API Call to %s: status: %s, %s", app, code, error)
return web.Response(body=res, status=code)
def get_web_response(self, request, code, error):
res = "<html><head><title>{} {}</title></head><body><h1>{} {}</h1>Error in Web Service Call</body></html>".format(code, error, code, error)
app = request.match_info.get('app', "system")
if code == 200:
self.access.info("Web Call to %s: status: %s", app, code)
else:
self.access.warning("Web Call to %s: status: %s, %s", app, code, error)
return web.Response(text=res, content_type="text/html")
@securedata
async def call_api(self, request):
code = 200
ret = ""
app = request.match_info.get('app')
try:
args = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
try:
ret, code = await self.dispatch_app_by_name(app, args)
except:
self.access.warning('-' * 60)
self.access.warning("Unexpected error during API call")
self.access.warning('-' * 60)
self.access.warning(traceback.format_exc())
self.access.warning('-' * 60)
if code == 404:
return self.get_response(request, 404, "App Not Found")
response = "OK"
self.access.info("API Call to %s: status: %s %s", app, code, response)
return web.json_response(ret, status=code, dumps=utils.convert_json)
# Routes, Status and Templates
async def register_endpoint(self, cb, name):
handle = uuid.uuid4().hex
if name not in self.endpoints:
self.endpoints[name] = {}
self.endpoints[name][handle] = {"callback": cb, "name": name}
return handle
async def unregister_endpoint(self, handle, name):
if name in self.endpoints and handle in self.endpoints[name]:
del self.endpoints[name][handle]
async def dispatch_app_by_name(self, name, args):
callback = None
for app in self.endpoints:
for handle in self.endpoints[app]:
if self.endpoints[app][handle]["name"] == name:
callback = self.endpoints[app][handle]["callback"]
if callback is not None:
if asyncio.iscoroutinefunction(callback):
return await callback(args)
else:
return await utils.run_in_executor(self, callback, args)
else:
return '', 404
#
# App based Web Server
#
async def register_app_route(self, cb, route, name, **kwargs):
if not asyncio.iscoroutinefunction(cb): # must be async function
self.logger.warning("Could not Register Callback for %s, using Route %s as Web Server Route. Callback must be Async", name, route)
return
handle = uuid.uuid4().hex
if name not in self.app_routes:
self.app_routes[name] = {}
token = kwargs.get("token")
self.app_routes[name][handle] = {"callback": cb, "route": route, "token": token}
return handle
async def unregister_app_route(self, handle, name):
if name in self.app_routes and handle in self.app_routes[name]:
del self.app_routes[name][handle]
@app_secure
async def app_webserver(self, request):
route = request.match_info.get('route')
token = request.query.get("token")
code = 404
error = "Requested Server does not exist"
callback = None
for name in self.app_routes:
if callback != None: # a callback has been collected
break
for handle in self.app_routes[name]:
app_route = self.app_routes[name][handle]["route"]
app_token = self.app_routes[name][handle]["token"]
if app_route == route :
if app_token != None and app_token != token:
return self.get_web_response(request, "401", "Unauthorized")
callback = self.app_routes[name][handle]["callback"]
break
if callback is not None:
self.access.debug("Web Call to %s for %s", route, name)
try:
f = asyncio.ensure_future(callback(request))
self.AD.futures.add_future(name, f)
return await f
except asyncio.CancelledError:
code = 503
error = "Request was Cancelled"
except:
self.access.warning('-' * 60)
self.access.warning("Unexpected error during Web call")
self.access.warning('-' * 60)
self.access.warning(traceback.format_exc())
self.access.warning('-' * 60)
code = 503
error = "Request had an Error"
return self.get_web_response(request, str(code), error)
#
# Admin
#
@secure
async def admin_page(self, request):
return await self._admin_page(request)
# Insecure version
async def _admin_page(self, request):
response = await self.admin_obj.admin_page(request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
async def logon_page(self, request):
response = await utils.run_in_executor(self, self.generate_logon_page, request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
async def error_page(self, request):
response = await utils.run_in_executor(self, self.generate_error_page, request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
def generate_logon_page(self, scheme, url):
try:
params = {}
env = Environment(
loader=FileSystemLoader(self.template_dir),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template("logon.jinja2")
rendered_template = template.render(params)
return rendered_template
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error creating logon page")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
def generate_error_page(self, scheme, url):
try:
params = {}
env = Environment(
loader=FileSystemLoader(self.template_dir),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template("error.jinja2")
rendered_template = template.render(params)
return rendered_template
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error creating logon page")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
Update http.py
import asyncio
import json
import os
import re
import time
import traceback
import concurrent.futures
from urllib.parse import urlparse
import feedparser
from aiohttp import web
import ssl
import bcrypt
import uuid
from jinja2 import Environment, FileSystemLoader, select_autoescape
import appdaemon.dashboard as addashboard
import appdaemon.utils as utils
import appdaemon.stream as stream
import appdaemon.admin as adadmin
from appdaemon.appdaemon import AppDaemon
def securedata(myfunc):
"""
Take care of streams and service calls
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None:
return await myfunc(*args)
elif "adcreds" in request.cookies:
match = await utils.run_in_executor(self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"]))
if match:
return await myfunc(*args)
elif ("x-ad-access" in request.headers) and (request.headers["x-ad-access"] == self.password):
return await myfunc(*args)
elif "api_password" in request.query and request.query["api_password"] == self.password:
return await myfunc(*args)
else:
return self.get_response(request, "401", "Unauthorized")
return wrapper
def secure(myfunc):
"""
Take care of screen based security
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None:
return await myfunc(*args)
else:
if "adcreds" in request.cookies:
match = await utils.run_in_executor(self, bcrypt.checkpw,
str.encode(self.password),
str.encode(request.cookies["adcreds"]))
if match:
return await myfunc(*args)
else:
return await self.forcelogon(request)
else:
return await self.forcelogon(request)
return wrapper
def app_secure(myfunc):
"""
Take care of streams and service calls
"""
async def wrapper(*args):
self = args[0]
request = args[1]
if self.password is None or self.valid_tokens == []:
return await myfunc(*args)
elif "adcreds" in request.cookies:
match = await utils.run_in_executor(self, bcrypt.checkpw, str.encode(self.password), str.encode(request.cookies["adcreds"]))
if match:
return await myfunc(*args)
elif "token" in request.query and request.query["token"] in self.valid_tokens:
return await myfunc(*args)
else:
return self.get_response(request, "401", "Unauthorized")
return wrapper
class HTTP:
def __init__(self, ad: AppDaemon, loop, logging, appdaemon, dashboard, admin, api, http):
self.AD = ad
self.logging = logging
self.logger = ad.logging.get_child("_http")
self.access = ad.logging.get_access()
self.appdaemon = appdaemon
self.dashboard = dashboard
self.dashboard_dir = None
self.admin = admin
self.http = http
self.api = api
self.template_dir = os.path.join(os.path.dirname(__file__), "assets", "templates")
self.password = None
self._process_arg("password", http)
self.valid_tokens = []
self._process_arg("tokens", http)
self.url = None
self._process_arg("url", http)
self.work_factor = 8
self._process_arg("work_factor", http)
self.ssl_certificate = None
self._process_arg("ssl_certificate", http)
self.ssl_key = None
self._process_arg("ssl_key", http)
self.transport = "ws"
self._process_arg("transport", http)
self.logger.info("Using '%s' for event stream", self.transport)
self.config_dir = None
self._process_arg("config_dir", dashboard)
self.stopping = False
self.endpoints = {}
self.app_routes = {}
self.dashboard_obj = None
self.admin_obj = None
self.install_dir = os.path.dirname(__file__)
self.javascript_dir = os.path.join(self.install_dir, "assets", "javascript")
self.template_dir = os.path.join(self.install_dir, "assets", "templates")
self.css_dir = os.path.join(self.install_dir, "assets", "css")
self.fonts_dir = os.path.join(self.install_dir, "assets", "fonts")
self.webfonts_dir = os.path.join(self.install_dir, "assets", "webfonts")
self.images_dir = os.path.join(self.install_dir, "assets", "images")
try:
url = urlparse(self.url)
net = url.netloc.split(":")
self.host = net[0]
try:
self.port = net[1]
except IndexError:
self.port = 80
if self.host == "":
raise ValueError("Invalid host for 'url'")
self.app = web.Application()
if "headers" in self.http:
self.app.on_response_prepare.append(self.add_response_headers)
# Setup event stream
self.stream = stream.ADStream(self.AD, self.app, self.transport)
self.loop = loop
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
#TODO the `context` local varialble is never used after its initialization, maybe it can be removed
if self.ssl_certificate is not None and self.ssl_key is not None:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(self.ssl_certificate, self.ssl_key)
else:
context = None
self.setup_http_routes()
#
# API
#
if api is not None:
self.logger.info("Starting API")
self.setup_api_routes()
else:
self.logger.info("API is disabled")
#
# Admin
#
if admin is not None:
self.logger.info("Starting Admin Interface")
self.stats_update = "realtime"
self._process_arg("stats_update", admin)
self.admin_obj = adadmin.Admin(self.config_dir, logging, self.AD,
javascript_dir=self.javascript_dir,
template_dir=self.template_dir,
css_dir=self.css_dir,
fonts_dir=self.fonts_dir,
webfonts_dir=self.webfonts_dir,
images_dir=self.images_dir,
**admin
)
else:
self.logger.info("Admin Interface is disabled")
#
# Dashboards
#
if dashboard is not None:
self.logger.info("Starting Dashboards")
self._process_arg("dashboard_dir", dashboard)
self.compile_on_start = True
self._process_arg("compile_on_start", dashboard)
self.force_compile = False
self._process_arg("force_compile", dashboard)
self.profile_dashboard = False
self._process_arg("profile_dashboard", dashboard)
self.rss_feeds = None
self._process_arg("rss_feeds", dashboard)
self.fa4compatibility = False
self._process_arg("fa4compatibility", dashboard)
if "rss_feeds" in dashboard:
self.rss_feeds = []
for feed in dashboard["rss_feeds"]:
if feed["target"].count('.') != 1:
self.logger.warning("Invalid RSS feed target: %s", feed["target"])
else:
self.rss_feeds.append(feed)
self.rss_update = None
self._process_arg("rss_update", dashboard)
self.rss_last_update = None
# find dashboard dir
if self.dashboard_dir is None:
if self.config_dir is None:
self.dashboard_dir = utils.find_path("dashboards")
else:
self.dashboard_dir = os.path.join(self.config_dir, "dashboards")
self.javascript_dir = os.path.join(self.install_dir, "assets", "javascript")
self.template_dir = os.path.join(self.install_dir, "assets", "templates")
self.css_dir = os.path.join(self.install_dir, "assets", "css")
self.fonts_dir = os.path.join(self.install_dir, "assets", "fonts")
self.webfonts_dir = os.path.join(self.install_dir, "assets", "webfonts")
self.images_dir = os.path.join(self.install_dir, "assets", "images")
#
# Setup compile directories
#
if self.config_dir is None:
self.compile_dir = utils.find_path("compiled")
else:
self.compile_dir = os.path.join(self.config_dir, "compiled")
self.dashboard_obj = addashboard.Dashboard(self.config_dir, self.logging,
dash_compile_on_start=self.compile_on_start,
dash_force_compile=self.force_compile,
profile_dashboard=self.profile_dashboard,
dashboard_dir=self.dashboard_dir,
fa4compatibility=self.fa4compatibility,
transport=self.transport,
javascript_dir=self.javascript_dir,
template_dir=self.template_dir,
css_dir=self.css_dir,
fonts_dir=self.fonts_dir,
webfonts_dir=self.webfonts_dir,
images_dir=self.images_dir)
self.setup_dashboard_routes()
else:
self.logger.info("Dashboards Disabled")
#
# Finish up and start the server
#
#handler = self.app.make_handler()
#f = loop.create_server(handler, "0.0.0.0", int(self.port), ssl=context)
#loop.create_task(f)
if self.dashboard_obj is not None:
loop.create_task(self.update_rss())
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in HTTP module")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
async def start_server(self):
self.logger.info("Running on port %s", self.port)
self.runner = web.AppRunner(self.app)
await self.runner.setup()
site = web.TCPSite(self.runner, '0.0.0.0', int(self.port))
await site.start()
async def stop_server(self):
self.logger.info("Shutting down webserver")
#
# We sjould do this nut it makes AD hang so ...
#
#await self.runner.cleanup()
async def add_response_headers(self, request, response):
for header, value in self.http['headers'].items():
response.headers[header] = value
def stop(self):
self.stopping = True
def _process_arg(self, arg, kwargs):
if kwargs:
if arg in kwargs:
setattr(self, arg, kwargs[arg])
@staticmethod
def check_password(password, hash):
return bcrypt.checkpw, str.encode(password), str.encode(hash)
async def forcelogon(self, request):
response = await self.logon_page(request)
return response
async def logon_response(self, request):
try:
data = await request.post()
password = data["password"]
if password == self.password:
self.access.info("Succesful logon from %s", request.host)
hashed = bcrypt.hashpw(str.encode(self.password), bcrypt.gensalt(self.work_factor))
if self.admin is not None:
response = await self._admin_page(request)
else:
response = await self._list_dash(request)
self.logger.debug("hashed=%s", hashed)
# Set cookie to last for 1 year
response.set_cookie("adcreds", hashed.decode("utf-8"), max_age=31536000)
else:
self.access.warning("Unsuccessful logon from %s", request.host)
response = await self.logon_page(request)
return response
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in logon_response()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Server error in logon_response()")
# noinspection PyUnusedLocal
@secure
async def list_dash(self, request):
return await self._list_dash(request)
async def _list_dash(self, request):
response = await utils.run_in_executor(self, self.dashboard_obj.get_dashboard_list)
return web.Response(text=response, content_type="text/html")
@secure
async def load_dash(self, request):
name = request.match_info.get('name', "Anonymous")
params = request.query
skin = params.get("skin", "default")
recompile = params.get("recompile", False)
if recompile == '1':
recompile = True
response = await utils.run_in_executor(self, self.dashboard_obj.get_dashboard, name, skin, recompile)
return web.Response(text=response, content_type="text/html")
async def update_rss(self):
# Grab RSS Feeds
if self.rss_feeds is not None and self.rss_update is not None:
while not self.stopping:
try:
if self.rss_last_update is None or (self.rss_last_update + self.rss_update) <= time.time():
self.rss_last_update = time.time()
for feed_data in self.rss_feeds:
feed = await utils.run_in_executor(self, feedparser.parse, feed_data["feed"])
if "bozo_exception" in feed:
self.logger.warning("Error in RSS feed %s: %s", feed_data["feed"], feed["bozo_exception"])
else:
new_state = {"feed": feed}
# RSS Feeds always live in the admin namespace
await self.AD.state.set_state("rss", "admin", feed_data["target"], state=new_state)
await asyncio.sleep(1)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in update_rss()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
#
# REST API
#
@securedata
async def get_ad(self, request):
return web.json_response({"state": {"status": "active"}}, dumps=utils.convert_json)
@securedata
async def get_entity(self, request):
namespace = None
entity_id = None
try:
entity_id = request.match_info.get('entity')
namespace = request.match_info.get('namespace')
self.logger.debug("get_state() called, ns=%s, entity=%s", namespace, entity_id)
state = self.AD.state.get_entity(namespace, entity_id)
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_entity()")
self.logger.warning("Namespace: %s, entity: %s", namespace, entity_id)
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_entity()")
@securedata
async def get_namespace(self, request):
namespace = None
try:
namespace = request.match_info.get('namespace')
self.logger.debug("get_namespace() called, ns=%s", namespace)
state = self.AD.state.get_entity(namespace)
self.logger.debug("result = %s", state)
if state is None:
return self.get_response(request, 404, "Namespace Not Found")
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_namespace()")
self.logger.warning("Namespace: %s", namespace)
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_namespace()")
@securedata
async def get_namespace_entities(self, request):
namespace = None
try:
namespace = request.match_info.get('namespace')
self.logger.debug("get_namespace_entities() called, ns=%s", namespace)
state = self.AD.state.list_namespace_entities(namespace)
self.logger.debug("result = %s", state)
if state is None:
return self.get_response(request, 404, "Namespace Not Found")
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_namespace_entities()")
self.logger.warning("Namespace: %s", namespace)
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_namespace_entities()")
@securedata
async def get_namespaces(self, request):
try:
self.logger.debug("get_namespaces() called)")
state = await self.AD.state.list_namespaces()
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_namespaces()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_namespaces()")
@securedata
async def get_services(self, request):
try:
self.logger.debug("get_services() called)")
state = self.AD.services.list_services()
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_services()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_services()")
@securedata
async def get_state(self, request):
try:
self.logger.debug("get_state() called")
state = self.AD.state.get_entity()
if state is None:
self.get_response(request, 404, "State Not Found")
self.logger.debug("result = %s", state)
return web.json_response({"state": state}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_state()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_state()")
@securedata
async def get_logs(self, request):
try:
self.logger.debug("get_logs() called")
logs = await utils.run_in_executor(self, self.AD.logging.get_admin_logs)
return web.json_response({"logs": logs}, dumps=utils.convert_json)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in get_logs()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return self.get_response(request, 500, "Unexpected error in get_logs()")
# noinspection PyUnusedLocal
@securedata
async def call_service(self, request):
try:
try:
data = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
args = {}
namespace = request.match_info.get('namespace')
domain = request.match_info.get('domain')
service = request.match_info.get('service')
#
# Some value munging for dashboard
#
for key in data:
if key == "service":
pass
elif key == "rgb_color":
m = re.search('\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)', data[key])
if m:
r = m.group(1)
g = m.group(2)
b = m.group(3)
args["rgb_color"] = [r, g, b]
elif key == "xy_color":
m = re.search('\s*(\d+\.\d+)\s*,\s*(\d+\.\d+)', data[key])
if m:
x = m.group(1)
y = m.group(2)
args["xy_color"] = [x, y]
elif key == "json_args":
json_args = json.loads(data[key])
for k in json_args.keys():
args[k] = json_args[k]
else:
args[key] = data[key]
self.logger.debug("call_service() args = %s", args)
await self.AD.services.call_service(namespace, domain, service, args)
return web.Response(status=200)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in call_service()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return web.Response(status=500)
@securedata
async def fire_event(self, request):
try:
try:
data = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
args = {}
namespace = request.match_info.get('namespace')
event = request.match_info.get('event')
#
# Some value munging for dashboard
#
for key in data:
if key == "event":
pass
else:
args[key] = data[key]
self.logger.debug("fire_event() args = %s", args)
await self.AD.events.fire_event(namespace, event, **args)
return web.Response(status=200)
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error in fire_event()")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
return web.Response(status=500)
# noinspection PyUnusedLocal
async def not_found(self, request):
return self.get_response(request, 404, "Not Found")
# Stream Handling
async def stream_update(self, namespace, data):
#self.logger.debug("stream_update() %s:%s", namespace, data)
data["namespace"] = namespace
self.AD.thread_async.call_async_no_wait(self.stream.send_update, data)
# Routes, Status and Templates
def setup_api_routes(self):
self.app.router.add_post('/api/appdaemon/service/{namespace}/{domain}/{service}', self.call_service)
self.app.router.add_post('/api/appdaemon/event/{namespace}/{event}', self.fire_event)
self.app.router.add_get('/api/appdaemon/service/', self.get_services)
self.app.router.add_get('/api/appdaemon/state/{namespace}/{entity}', self.get_entity)
self.app.router.add_get('/api/appdaemon/state/{namespace}', self.get_namespace)
self.app.router.add_get('/api/appdaemon/state/{namespace}/', self.get_namespace_entities)
self.app.router.add_get('/api/appdaemon/state/', self.get_namespaces)
self.app.router.add_get('/api/appdaemon/state', self.get_state)
self.app.router.add_get('/api/appdaemon/logs', self.get_logs)
self.app.router.add_post('/api/appdaemon/{app}', self.call_api)
self.app.router.add_get('/api/appdaemon', self.get_ad)
def setup_http_routes(self):
self.app.router.add_get('/favicon.ico', self.not_found)
self.app.router.add_get('/{gfx}.png', self.not_found)
self.app.router.add_post('/logon_response', self.logon_response)
# Add static path for JavaScript
self.app.router.add_static('/javascript', self.javascript_dir)
# Add static path for fonts
self.app.router.add_static('/fonts', self.fonts_dir)
# Add static path for webfonts
self.app.router.add_static('/webfonts', self.webfonts_dir)
# Add static path for images
self.app.router.add_static('/images', self.images_dir)
# Add static path for css
self.app.router.add_static('/css', self.css_dir)
if self.admin is not None:
self.app.router.add_get('/', self.admin_page)
elif self.dashboard is not None:
self.app.router.add_get('/', self.list_dash)
else:
self.app.router.add_get('/', self.error_page)
#
# For App based Web Server
#
self.app.router.add_get('/app/{route}', self.app_webserver)
def setup_dashboard_routes(self):
self.app.router.add_get('/list', self.list_dash)
self.app.router.add_get('/{name}', self.load_dash)
# Setup Templates
self.app.router.add_static('/compiled_javascript', self.dashboard_obj.compiled_javascript_dir)
self.app.router.add_static('/compiled_css', self.dashboard_obj.compiled_css_dir)
# Add path for custom_css if it exists
custom_css = os.path.join(self.dashboard_obj.config_dir, "custom_css")
if os.path.isdir(custom_css):
self.app.router.add_static('/custom_css', custom_css)
# API
async def terminate_app(self, name):
if name in self.endpoints:
del self.endpoints[name]
if name in self.app_routes:
del self.app_routes[name]
def get_response(self, request, code, error):
res = "<html><head><title>{} {}</title></head><body><h1>{} {}</h1>Error in API Call</body></html>".format(code, error, code, error)
app = request.match_info.get('app', "system")
if code == 200:
self.access.info("API Call to %s: status: %s", app, code)
else:
self.access.warning("API Call to %s: status: %s, %s", app, code, error)
return web.Response(body=res, status=code)
def get_web_response(self, request, code, error):
res = "<html><head><title>{} {}</title></head><body><h1>{} {}</h1>Error in Web Service Call</body></html>".format(code, error, code, error)
app = request.match_info.get('app', "system")
if code == 200:
self.access.info("Web Call to %s: status: %s", app, code)
else:
self.access.warning("Web Call to %s: status: %s, %s", app, code, error)
return web.Response(text=res, content_type="text/html")
@securedata
async def call_api(self, request):
code = 200
ret = ""
app = request.match_info.get('app')
try:
args = await request.json()
except json.decoder.JSONDecodeError:
return self.get_response(request, 400, "JSON Decode Error")
try:
ret, code = await self.dispatch_app_by_name(app, args)
except:
self.logger.error('-' * 60)
self.logger.error("Unexpected error during API call")
self.logger.error('-' * 60)
self.logger.error(traceback.format_exc())
self.logger.error('-' * 60)
if code == 404:
return self.get_response(request, 404, "App Not Found")
response = "OK"
self.access.info("API Call to %s: status: %s %s", app, code, response)
return web.json_response(ret, status=code, dumps=utils.convert_json)
# Routes, Status and Templates
async def register_endpoint(self, cb, name):
handle = uuid.uuid4().hex
if name not in self.endpoints:
self.endpoints[name] = {}
self.endpoints[name][handle] = {"callback": cb, "name": name}
return handle
async def unregister_endpoint(self, handle, name):
if name in self.endpoints and handle in self.endpoints[name]:
del self.endpoints[name][handle]
async def dispatch_app_by_name(self, name, args):
callback = None
for app in self.endpoints:
for handle in self.endpoints[app]:
if self.endpoints[app][handle]["name"] == name:
callback = self.endpoints[app][handle]["callback"]
if callback is not None:
if asyncio.iscoroutinefunction(callback):
return await callback(args)
else:
return await utils.run_in_executor(self, callback, args)
else:
return '', 404
#
# App based Web Server
#
async def register_app_route(self, cb, route, name, **kwargs):
if not asyncio.iscoroutinefunction(cb): # must be async function
self.logger.warning("Could not Register Callback for %s, using Route %s as Web Server Route. Callback must be Async", name, route)
return
handle = uuid.uuid4().hex
if name not in self.app_routes:
self.app_routes[name] = {}
token = kwargs.get("token")
self.app_routes[name][handle] = {"callback": cb, "route": route, "token": token}
return handle
async def unregister_app_route(self, handle, name):
if name in self.app_routes and handle in self.app_routes[name]:
del self.app_routes[name][handle]
@app_secure
async def app_webserver(self, request):
route = request.match_info.get('route')
token = request.query.get("token")
code = 404
error = "Requested Server does not exist"
callback = None
for name in self.app_routes:
if callback != None: # a callback has been collected
break
for handle in self.app_routes[name]:
app_route = self.app_routes[name][handle]["route"]
app_token = self.app_routes[name][handle]["token"]
if app_route == route :
if app_token != None and app_token != token:
return self.get_web_response(request, "401", "Unauthorized")
callback = self.app_routes[name][handle]["callback"]
break
if callback is not None:
self.access.debug("Web Call to %s for %s", route, name)
try:
f = asyncio.ensure_future(callback(request))
self.AD.futures.add_future(name, f)
return await f
except asyncio.CancelledError:
code = 503
error = "Request was Cancelled"
except:
self.access.warning('-' * 60)
self.access.warning("Unexpected error during Web call")
self.access.warning('-' * 60)
self.access.warning(traceback.format_exc())
self.access.warning('-' * 60)
code = 503
error = "Request had an Error"
return self.get_web_response(request, str(code), error)
#
# Admin
#
@secure
async def admin_page(self, request):
return await self._admin_page(request)
# Insecure version
async def _admin_page(self, request):
response = await self.admin_obj.admin_page(request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
async def logon_page(self, request):
response = await utils.run_in_executor(self, self.generate_logon_page, request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
async def error_page(self, request):
response = await utils.run_in_executor(self, self.generate_error_page, request.scheme, request.host)
return web.Response(text=response, content_type="text/html")
def generate_logon_page(self, scheme, url):
try:
params = {}
env = Environment(
loader=FileSystemLoader(self.template_dir),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template("logon.jinja2")
rendered_template = template.render(params)
return rendered_template
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error creating logon page")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
def generate_error_page(self, scheme, url):
try:
params = {}
env = Environment(
loader=FileSystemLoader(self.template_dir),
autoescape=select_autoescape(['html', 'xml'])
)
template = env.get_template("error.jinja2")
rendered_template = template.render(params)
return rendered_template
except:
self.logger.warning('-' * 60)
self.logger.warning("Unexpected error creating logon page")
self.logger.warning('-' * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning('-' * 60)
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pfor and for_loop."""
# pylint: disable=g-direct-tensorflow-import
import functools
import sys
import time
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_v2_toggles
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_list_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class PForTest(PForTestCase):
def test_op_conversion_fallback_to_while_loop(self):
# Note that we used top_k op for this test. If a converter gets defined for
# it, we will need to find another op for which a converter has not been
# defined.
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return nn.top_k(x_i)
with self.assertRaisesRegex(ValueError, "No pfor vectorization"):
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=False)
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=True)
def test_nested_defun(self):
def loop_fn(a):
range(array_ops.constant(5))
return 1 + 1
@def_function.function
def f():
return self._test_loop_fn(loop_fn,2)
self.assertAllEqual(2, f()))
def test_parallel_iterations(self):
for parallel_iterations in [2, 3, 8, 10]:
x = random_ops.random_uniform([8, 3])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.gather(x, i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 8, parallel_iterations=parallel_iterations)
self._test_loop_fn(
loop_fn,
4 * constant_op.constant(2),
parallel_iterations=parallel_iterations)
def test_parallel_iterations_preserves_static_shape(self):
for parallel_iterations in [2, 3, 8, 10]:
x = pfor_control_flow_ops.pfor(
lambda _: random_ops.random_uniform([2, 3]),
8,
parallel_iterations=parallel_iterations)
self.assertAllEqual(x.shape, [8, 2, 3])
def test_parallel_iterations_zero(self):
with self.assertRaisesRegex(ValueError, "positive integer"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=0)
with self.assertRaisesRegex(TypeError, "positive integer"):
pfor_control_flow_ops.for_loop(
lambda i: 1, dtypes.int32, 8, parallel_iterations=0)
def test_parallel_iterations_one(self):
with self.assertRaisesRegex(ValueError, "Use `for_loop` instead"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=1)
def test_vectorized_map(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
result = pfor_control_flow_ops.vectorized_map(compute,
array_ops.ones((10, 5, 3)))
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
def test_vectorized_map_with_dynamic_shape(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
x = array_ops.placeholder_with_default(
array_ops.ones((10, 5, 3)), shape=None)
result = pfor_control_flow_ops.vectorized_map(compute, x)
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
def test_where_shape(self):
@def_function.function
def f():
a = constant_op.constant([[1.], [1.]])
b = constant_op.constant([1.])
result = pfor_control_flow_ops.vectorized_map(
lambda x: array_ops.where(x > 0, x, b), a)
return result.shape
self.assertAllEqual([2, 1], f())
def test_vectorized_map_broadcasts_unit_dimensions(self):
convert_with_static_shape = ops.convert_to_tensor
convert_with_dynamic_shape = (
lambda x: array_ops.placeholder_with_default(x, shape=None))
for convert in (convert_with_static_shape, convert_with_dynamic_shape):
a = convert([3.1])
b = convert([-2., 6., 9.])
# One elem with leading unit dimension.
a_plus_1 = pfor_control_flow_ops.vectorized_map(lambda a: a + 1, a)
self.assertAllEqual(*self.evaluate((a_plus_1, a + 1)))
# Two elems, both with leading unit dimension.
a_plus_a = pfor_control_flow_ops.vectorized_map(sum, (a, a))
self.assertAllEqual(*self.evaluate((a_plus_a, a + a)))
# Elem w/ unit dimension broadcast against elem with batch dim.
a_plus_b = pfor_control_flow_ops.vectorized_map(sum, (a, b))
self.assertAllEqual(*self.evaluate((a_plus_b, a + b)))
def test_vectorized_map_example_1(self):
def outer_product(a):
return math_ops.tensordot(a, a, 0)
batch_size = 100
a = array_ops.ones((batch_size, 32, 32))
c = pfor_control_flow_ops.vectorized_map(outer_product, a)
self.assertAllEqual((batch_size, 32, 32, 32, 32), c.shape)
def test_disable_tf_function(self):
def_function.run_functions_eagerly(True)
# vectorized_map should ignore disabling tf.functions
self.assertTrue(def_function.functions_run_eagerly())
self.assertAllEqual([0, 1, 4, 9],
pfor_control_flow_ops.vectorized_map(
lambda x: x * x, math_ops.range(4)))
self.assertTrue(def_function.functions_run_eagerly())
def_function.run_functions_eagerly(False)
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesTest(PForTestCase):
def test_indexed_slices(self):
def loop_fn(i):
return indexed_slices.IndexedSlices(
indices=i, values=array_ops.reshape(i, [1]), dense_shape=[3, 1])
self._test_loop_fn(loop_fn, 2)
def test_indexed_slices_components(self):
def loop_fn(i):
slices = indexed_slices.IndexedSlices(
indices=i, values=array_ops.reshape(i, [1]), dense_shape=[3, 1])
# Note that returning the components inside the slice avoids
# densification, which may be more efficient.
return slices.values, slices.indices
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class ReductionTest(PForTestCase):
def test_reduce(self):
def reduce_fn(p, q):
return math_ops.reduce_mean(p + q, axis=0)
x = random_ops.random_uniform([4, 3, 2])
y = random_ops.random_uniform([4, 3, 2])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
reduced = pfor_config.reduce(reduce_fn, x_i, y_i)
return reduced + x_i
output = pfor_control_flow_ops.pfor(loop_fn, 4)
ans = reduce_fn(x, y) + x
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_concat(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
vectorized_value = pfor_config.reduce_concat(x_i)
mean_value = math_ops.reduce_mean(vectorized_value, axis=0)
return x_i - mean_value
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_mean(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_sum(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_sum(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_sum(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_class(self):
x = random_ops.random_uniform([8, 3])
class LoopFn:
def __init__(self):
pass
def __call__(self, i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(LoopFn(), 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_functools_partial(self):
x = random_ops.random_uniform([8, 3])
def fn(i, pfor_config, dummy=None):
del dummy
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
loop_fn = functools.partial(fn, dummy=1)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_parallel_iterations(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return pfor_config.reduce_sum(x_i)
with self.assertRaisesRegex(ValueError,
"`parallel_iterations` currently unsupported"):
pfor_control_flow_ops.pfor(loop_fn, 8, parallel_iterations=2)
def test_var_loop_len(self):
if context.executing_eagerly():
self.skipTest("Variable length not possible under eager execution.")
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
return pfor_config.reduce_sum(array_ops.gather(x, i))
num_iters = array_ops.placeholder(dtypes.int32)
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 8})
@test_util.run_all_in_graph_and_eager_modes
class BitwiseTest(PForTestCase):
def test_unary_cwise(self):
for op in [bitwise_ops.invert]:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
return op(x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_binary_cwise(self):
binary_ops = [
bitwise_ops.bitwise_and,
bitwise_ops.bitwise_or,
bitwise_ops.bitwise_xor,
bitwise_ops.left_shift,
bitwise_ops.right_shift,
]
for op in binary_ops:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
y = random_ops.random_uniform([3, 5], maxval=10, dtype=dtypes.int32)
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend(t.dtype for t in outputs)
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
@test_util.run_all_in_graph_and_eager_modes
class ImageTest(PForTestCase):
def test_adjust_contrast(self):
images = random_ops.random_uniform([3, 2, 4, 4, 3])
def loop_fn(i):
image = array_ops.gather(images, i)
return image_ops.adjust_contrast(image, 2.0)
self._test_loop_fn(loop_fn, 3)
def test_adjust_hue(self):
images = random_ops.random_uniform([3, 2, 4, 4, 3])
def loop_fn(i):
image = array_ops.gather(images, i)
return image_ops.adjust_hue(image, .25)
self._test_loop_fn(loop_fn, 3)
def test_adjust_saturation(self):
images = random_ops.random_uniform([3, 2, 4, 4, 3])
def loop_fn(i):
image = array_ops.gather(images, i)
return image_ops.adjust_saturation(image, 0.1)
self._test_loop_fn(loop_fn, 3)
@test_util.run_all_in_graph_and_eager_modes
class NNTest(PForTestCase):
def test_conv2d(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filt = random_ops.random_uniform([3, 3, 3, 7])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return nn.conv2d(
x1, filt, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_input(self):
x_shape = [2, 12, 12, 3]
filt = random_ops.random_uniform([3, 3, 3, 7])
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
return nn.conv2d_backprop_input(
x_shape,
filt,
grad1,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_filter(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
x_0 = array_ops.gather(x, 0)
filter_sizes = [3, 3, 3, 7]
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return [
nn.conv2d_backprop_filter(
inp,
filter_sizes,
grad_i,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC") for inp in [x_i, x_0]
]
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filt = random_ops.random_uniform([3, 3, 3, 3, 2])
def loop_fn(i):
x1 = array_ops.gather(x, i)
filt1 = array_ops.gather(filt, i)
return nn.depthwise_conv2d_native(
x1, filt1, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native_backprop_input(self):
x_shape = [2, 12, 12, 3]
filt = random_ops.random_uniform([3, 3, 3, 3, 2])
grad = random_ops.random_uniform([3, 2, 5, 5, 6])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
filt1 = array_ops.gather(filt, i)
return nn.depthwise_conv2d_native_backprop_input(
x_shape,
filt1,
grad1,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native_backprop_filter(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filter_sizes = [3, 3, 3, 2]
grad = random_ops.random_uniform([3, 2, 5, 5, 6])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return nn.depthwise_conv2d_native_backprop_filter(
x_i,
filter_sizes,
grad_i,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native_nchw(self):
if not test_util.is_gpu_available():
self.skipTest("NCHW only works on GPU")
x = random_ops.random_uniform([3, 2, 3, 12, 12])
filt = random_ops.random_uniform([3, 3, 3, 3, 2])
def loop_fn(i):
x1 = array_ops.gather(x, i)
filt1 = array_ops.gather(filt, i)
return nn.depthwise_conv2d_native(
x1, filt1, strides=[1, 1, 2, 2], padding="VALID", data_format="NCHW")
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native_backprop_input_nchw(self):
if not test_util.is_gpu_available():
self.skipTest("NCHW only works on GPU")
x_shape = [2, 3, 12, 12]
filt = random_ops.random_uniform([3, 3, 3, 3, 2])
grad = random_ops.random_uniform([3, 2, 6, 5, 5])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
filt1 = array_ops.gather(filt, i)
return nn.depthwise_conv2d_native_backprop_input(
x_shape,
filt1,
grad1,
strides=[1, 1, 2, 2],
padding="VALID",
data_format="NCHW")
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native_backprop_filter_nchw(self):
if not test_util.is_gpu_available():
self.skipTest("NCHW only works on GPU")
x = random_ops.random_uniform([3, 2, 3, 12, 12])
filter_sizes = [3, 3, 3, 2]
grad = random_ops.random_uniform([3, 2, 6, 5, 5])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return nn.depthwise_conv2d_native_backprop_filter(
x_i,
filter_sizes,
grad_i,
strides=[1, 1, 2, 2],
padding="VALID",
data_format="NCHW")
self._test_loop_fn(loop_fn, 3)
def test_roll(self):
x = random_ops.random_uniform([3, 6, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return manip_ops.roll(x_i, 3, axis=1)
self._test_loop_fn(loop_fn, 3)
def test_ensure_shape(self):
x = random_ops.random_uniform([3, 6, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.ensure_shape(x_i, [6, 7])
self._test_loop_fn(loop_fn, 3)
def test_loop_variant_roll_shift(self):
x = random_ops.random_uniform([3, 5, 6, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return manip_ops.roll(x_i, [i - 2, -1, i], axis=[1, 2, 2])
self._test_loop_fn(loop_fn, 3)
def test_loop_variant_roll_scalar_shift(self):
x = random_ops.random_uniform([5, 5, 6])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return manip_ops.roll(x_i, i, axis=0)
self._test_loop_fn(loop_fn, 5)
def test_avg_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool(
x1,
ksize,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3)
def test_avg_pool3d(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([5, 3, 7, 6, 6, 5])
g.watch(x)
ksize = [1, 2, 2, 2, 1]
strides = [1, 2, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3)
def test_max_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3)
def test_max_pool_v2(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = gen_nn_ops.max_pool_v2(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3)
def test_max_pool3d(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 1, 3, 3, 1]
strides = [1, 1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3)
def test_fused_batch_norm(self):
data_formats = ["NHWC"]
if test.is_gpu_available():
data_formats.append("NCHW")
for is_training in (True, False):
for data_format in data_formats:
with backprop.GradientTape(persistent=True) as g:
if data_format == "NCHW":
x = random_ops.random_uniform([3, 1, 2, 5, 5])
else:
x = random_ops.random_uniform([3, 1, 5, 5, 2])
g.watch(x)
scale = random_ops.random_uniform([2])
g.watch(scale)
offset = random_ops.random_uniform([2])
g.watch(offset)
mean = None if is_training else random_ops.random_uniform([2])
variance = None if is_training else random_ops.random_uniform([2])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
outputs = nn.fused_batch_norm(
x1,
scale,
offset,
mean=mean,
variance=variance,
epsilon=0.01,
data_format=data_format,
is_training=is_training)
outputs = list(outputs)
# We only test the first value of outputs when is_training is
# False. It looks like CPU and GPU have different outputs for
# batch_mean and batch_variance for this case.
if not is_training:
outputs[1] = constant_op.constant(0.)
outputs[2] = constant_op.constant(0.)
loss = nn.l2_loss(outputs[0])
if is_training:
gradients = g.gradient(loss, [x1, scale, offset])
else:
gradients = [constant_op.constant(0.)] * 3
return outputs + gradients
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_log_softmax(self):
logits = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return (nn.log_softmax(logits_i), nn.log_softmax(logits_i, axis=0),
nn.log_softmax(logits_i, axis=-1))
self._test_loop_fn(loop_fn, 3)
def test_softmax(self):
logits = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return (nn.softmax(logits_i), nn.softmax(logits_i, axis=0),
nn.softmax(logits_i, axis=-1))
self._test_loop_fn(loop_fn, 3)
def test_softmax_cross_entropy_with_logits(self):
with backprop.GradientTape(persistent=True) as g:
logits = random_ops.random_uniform([3, 2, 4])
g.watch(logits)
labels = random_ops.random_uniform([3, 2, 4])
labels /= math_ops.reduce_sum(labels, axis=[2], keepdims=True)
def loop_fn(i):
with g:
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
total_loss = math_ops.reduce_sum(loss)
return loss, g.gradient(total_loss, logits_i)
self._test_loop_fn(loop_fn, 3)
def test_sparse_softmax_cross_entropy_with_logits(self):
logits = random_ops.random_uniform([3, 2, 4])
labels = random_ops.random_uniform(
shape=[3, 2], maxval=4, dtype=dtypes.int32)
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.sparse_softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
return loss
self._test_loop_fn(loop_fn, 3)
class RandomTest(PForTestCase):
# The random values generated in the two implementations are not guaranteed to
# match. So we only check the returned shapes.
def run_and_assert_equal(self, targets1, targets2, rtol=1e-4, atol=1e-5):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
def test_random_uniform(self):
def loop_fn(_):
return random_ops.random_uniform([3])
self._test_loop_fn(loop_fn, 5)
def test_random_uniform_int(self):
def loop_fn(_):
return random_ops.random_uniform([3], maxval=1, dtype=dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_random_standard_normal(self):
def loop_fn(_):
return random_ops.random_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_truncated_normal(self):
def loop_fn(_):
return random_ops.truncated_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma_invariant_alpha(self):
def loop_fn(_):
return random_ops.random_gamma([3], alpha=[0.5])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma_varying_alpha(self):
alphas = math_ops.exp(random_ops.random_normal([5, 3, 2]))
def loop_fn(i):
alphas_i = array_ops.gather(alphas, i)
# Test both scalar and non-scalar params and shapes.
return (random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[]),
random_ops.random_gamma(alpha=alphas_i, shape=[]),
random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[3]),
random_ops.random_gamma(alpha=alphas_i, shape=[3]))
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2_invariant_rate(self):
def loop_fn(_):
return random_ops.random_poisson(lam=[1.3], shape=[3])
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2_varying_rate(self):
rates = math_ops.exp(random_ops.random_normal([5, 3, 2]))
def loop_fn(i):
rates_i = array_ops.gather(rates, i)
# Test both scalar and non-scalar params and shapes.
return (random_ops.random_poisson(lam=rates_i[0, 0], shape=[]),
random_ops.random_poisson(lam=rates_i, shape=[]),
random_ops.random_poisson(lam=rates_i[0, 0], shape=[3]),
random_ops.random_poisson(lam=rates_i, shape=[3]))
self._test_loop_fn(loop_fn, 5)
def test_random_multinomial_invariant_logits(self):
def loop_fn(_):
return random_ops.categorical(logits=[[1., -1.]], num_samples=3)
self._test_loop_fn(loop_fn, 5)
def test_random_multinomial_varying_logits(self):
logits = random_ops.random_normal([5, 3, 2])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return random_ops.categorical(logits_i, num_samples=3)
self._test_loop_fn(loop_fn, 5)
class StatelessRandomTest(PForTestCase):
# This test currently only tests that the vectorized and non-vectorized
# outputs have same shapes. This is needed since under XLA compilation,
# stateless random numbers can generate different random numbers.
# TODO(agarwal): switch to checking for actual values matching once
# b/149402339 is resolved.
def run_and_assert_equal(self, targets1, targets2, rtol=1e-4, atol=1e-5):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
# TODO(agarwal): add tests for other random functions
def test_multinomial(self):
seeds = [[1, 2], [3, 4]]
logits = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
logits_0 = array_ops.gather(logits, 0)
logits_i = array_ops.gather(logits, i)
seeds_0 = array_ops.gather(seeds, 0)
seeds_i = array_ops.gather(seeds, i)
return (stateless_random_ops.stateless_categorical(
logits=logits_i, num_samples=3, seed=seeds_i),
stateless_random_ops.stateless_categorical(
logits=logits_i, num_samples=3, seed=seeds_0),
stateless_random_ops.stateless_categorical(
logits=logits_0, num_samples=3, seed=seeds_i),
stateless_random_ops.stateless_categorical(
logits=logits_0, num_samples=3, seed=seeds_0))
self._test_loop_fn(loop_fn, 2)
class LoggingTest(PForTestCase):
def test_print(self):
x = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return logging_ops.Print(
x1, [x1, "x1", array_ops.shape(x1)], summarize=10)
self._test_loop_fn(loop_fn, 3)
def test_print_v2(self):
x = constant_op.constant([1, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
with ops.control_dependencies([
logging_ops.print_v2(
x1, "x1", array_ops.shape(x1), summarize=10)]):
return array_ops.identity(x1)
self._test_loop_fn(loop_fn, 3)
with self.captureWritesToStream(sys.stderr) as printed:
self.evaluate(pfor_control_flow_ops.pfor(loop_fn, 3))
self.assertIn("[1 2 3] x1 []", printed.contents())
def test_assert(self):
def loop_fn(i):
return control_flow_ops.Assert(i < 10, [i, [10], [i + 1]])
# TODO(agarwal): make this work with for_loop.
with session.Session() as sess:
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))
sess.run(pfor_control_flow_ops.pfor(
lambda i, pfor_config: loop_fn(i), 3))
class TensorArrayTest(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.disable_control_flow_v2()
super(TensorArrayTest, self).setUp()
def tearDown(self):
if self._enabled:
control_flow_v2_toggles.enable_control_flow_v2()
super(TensorArrayTest, self).tearDown()
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_read(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.read(i), ta.read(0)
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_gather(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.gather([i]), ta.gather([0, 1])
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_write_and_scatter(self):
t = tensor_array_ops.TensorArray(dtypes.int32, 10, clear_after_read=False)
handle = t.handle
def loop_fn(i):
ta = t.write(i + 2, 2 * i).write(i, 5)
ta = ta.scatter([4 + i], [4]).scatter([6 + i, 8 + i], [6 + i, 8 + i])
return ta.flow
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
out1 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t1[-1]).stack()
output1 = self._run_targets(out1)
t2 = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, iters=2)
out2 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t2[-1]).stack()
output2 = self._run_targets(out2)
self.assertAllClose(output2, output1)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_write(self):
def loop_fn(i):
# TODO(agarwal): switching the order of writes to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).write(0,
i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(dtypes.int32, 1).write(0, 1)
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_scatter(self):
def loop_fn(i):
# TODO(agarwal): switching the order of scatter to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0],
[[i, 2]]).scatter([1],
[[1, 2]])
ta2 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0], [3]).scatter([1], [4])
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_read(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.read(0), ta2.read(0), ta2.read(i)
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_gather(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.gather([0, 1]), ta2.gather([0, 1]), ta2.gather([i])
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_grad(self):
x = random_ops.random_uniform([3, 2])
ta = tensor_array_ops.TensorArray(
dtypes.float32, 3, clear_after_read=False).unstack(x)
y = math_ops.square(ta.stack())
def loop_fn(i):
y_i = array_ops.gather(y, i)
grad = gradient_ops.gradients(y_i, x)[0]
return array_ops.gather(grad, i)
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# y = x * x. Hence dy/dx = 2 * x.
actual_grad = 2.0 * x
with session.Session() as sess:
actual_grad, computed_grad = sess.run([t1, actual_grad])
self.assertAllClose(actual_grad, computed_grad)
@test_util.run_all_in_graph_and_eager_modes
class TensorListTest(PForTestCase):
def test_create_outside_and_write(self):
handle1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
def loop_fn(i):
h1 = list_ops.tensor_list_set_item(handle1, 0, i)
h1 = list_ops.tensor_list_set_item(h1, 1, 1)
h2 = list_ops.tensor_list_set_item(handle2, 0, 1)
return (list_ops.tensor_list_stack(h1, dtypes.int32),
list_ops.tensor_list_stack(h2, dtypes.int32))
self._test_loop_fn(loop_fn, 3)
def _make_graph_def(self, text):
ret = graph_pb2.GraphDef()
text_format.Parse(text, ret)
return ret
def test_no_fallback_with_internal_stacking(self):
# Create an op (really a function) that pfor definitely does not have a
# converter for. Assumes pfor does not start looking up function definitions
# for op-type-is-function-name calls.
@def_function.function
def opaque_list_fetch(x):
array_ops.identity(x)
return list_ops.tensor_list_get_item(x, 0, dtypes.int32)
external_handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
opaque_list_fetch_concrete = opaque_list_fetch.get_concrete_function(
external_handle)
opaque_list_fetch_name = opaque_list_fetch_concrete.name
def loop_fn(i):
h1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h1 = list_ops.tensor_list_set_item(h1, 0, i)
opaque_list_fetch_concrete.add_to_graph()
graph_def = self._make_graph_def("""
node { name: 'x' op: 'Placeholder'
attr { key: 'dtype' value { type: DT_FLOAT } }}
node { name: 'fn' op: '""" + opaque_list_fetch_name.decode()
+ """' input: 'x:0' }""")
return importer.import_graph_def(
graph_def,
input_map={"x:0": h1},
return_elements=["fn"],
name="import")[0].outputs[0]
with self.assertRaisesRegex(ValueError, "No pfor vectorization"):
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=False)
with self.assertRaisesRegex(ValueError, "No pfor vectorization"):
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=True)
def test_create_inside_and_write(self):
def loop_fn(i):
h1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h1 = list_ops.tensor_list_set_item(h1, 0, i)
h1 = list_ops.tensor_list_set_item(h1, 1, 1)
h2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h2 = list_ops.tensor_list_set_item(h2, 0, 1)
return (list_ops.tensor_list_stack(h1, dtypes.int32),
list_ops.tensor_list_stack(h2, dtypes.int32))
self._test_loop_fn(loop_fn, 3)
def test_create_outside_and_read(self):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle = list_ops.tensor_list_set_item(handle, 0, 0)
handle = list_ops.tensor_list_set_item(handle, 1, 1)
def loop_fn(i):
return (list_ops.tensor_list_get_item(handle, i, dtypes.int32),
list_ops.tensor_list_get_item(handle, 0, dtypes.int32),
list_ops.tensor_list_length(handle),
list_ops.tensor_list_element_shape(handle, dtypes.int32),
list_ops.tensor_list_element_shape(handle, dtypes.int64))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_read(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle = list_ops.tensor_list_set_item(handle, 0, i)
handle = list_ops.tensor_list_set_item(handle, 1, 1)
return (list_ops.tensor_list_get_item(handle, 0, dtypes.int32),
list_ops.tensor_list_get_item(handle, i, dtypes.int32),
list_ops.tensor_list_length(handle),
list_ops.tensor_list_element_shape(handle, dtypes.int32),
list_ops.tensor_list_element_shape(handle, dtypes.int64))
self._test_loop_fn(loop_fn, 2)
def test_create_outside_and_push_back(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_push_back(h, [i, 2])
handle = list_ops.tensor_list_push_back(handle, [1, 2])
handle = list_ops.tensor_list_push_back(handle, [1, 2])
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_create_inside_and_push_back(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_push_back(handle, [i, 2])
handle = list_ops.tensor_list_push_back(handle, [1, 2])
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_pop_back_no_shape(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_push_back(handle, [1, 2])
handle = list_ops.tensor_list_push_back(handle, [i, 2])
handle, tensor = list_ops.tensor_list_pop_back(handle, dtypes.int32)
return tensor, list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_pop_back_no_shape_capture(self):
h = list_ops.tensor_list_reserve([2], 1, dtypes.int32)
h = list_ops.tensor_list_push_back(h, [1, 2])
def loop_fn(i):
handle, tensor = list_ops.tensor_list_pop_back(h, dtypes.int32)
handle = list_ops.tensor_list_push_back(handle, [1, i])
return tensor, list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_pop_back_with_shape(self):
@def_function.function
def loop_fn(i):
with backprop.GradientTape() as tape:
handle = list_ops.tensor_list_reserve(None, 1, dtypes.float32)
x = math_ops.cast(i, dtypes.float32)[None]
tape.watch(x)
handle = list_ops.tensor_list_push_back(handle, x)
stacked = list_ops.tensor_list_stack(handle, dtypes.float32)
list_grad = tape.gradient(stacked, x, x)
self.assertEqual("TensorListPopBack", list_grad.op.type)
return list_grad, stacked, list_grad.op.inputs[1]
self._test_loop_fn(loop_fn, 3)
def test_create_outside_and_scatter(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=h)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_create_inside_and_scatter(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_loop_variant_scatter_indices(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 10, dtypes.int32)
handle = list_ops.tensor_list_scatter(
[[1, i], [i + 1, 2]],
[i, i + 5], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_duplicate_indices(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 10, dtypes.int32)
handle = list_ops.tensor_list_scatter(
[[1, i], [1, i + 1], [i + 2, 3]],
[i, i, i + 2], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_create_outside_and_gather(self):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[2, 3]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
def loop_fn(i):
return (list_ops.tensor_list_gather(handle, [0, 1], dtypes.int32),
list_ops.tensor_list_gather(handle, [i], dtypes.int32))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_gather(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return (list_ops.tensor_list_gather(handle, [0, 1], dtypes.int32),
list_ops.tensor_list_gather(handle, [i], dtypes.int32))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_concat(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return gen_list_ops.tensor_list_concat_v2(
handle,
element_dtype=dtypes.int32,
element_shape=[2],
leading_dims=[])
output = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([[0, 2, 1, 2], [1, 2, 1, 2]], output[0])
self.assertAllClose([[2, 2], [2, 2]], output[1])
def test_create_outside_and_concat(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=h)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return gen_list_ops.tensor_list_concat_v2(
handle,
element_dtype=dtypes.int32,
element_shape=[2],
leading_dims=[])
output = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([[0, 2, 1, 2], [1, 2, 1, 2]], output[0])
self.assertAllClose([[2, 2], [2, 2]], output[1])
def test_tensor_list_from_tensor(self):
t = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
handle = list_ops.tensor_list_from_tensor(array_ops.gather(t, i), [4])
return list_ops.tensor_list_stack(handle, t.dtype)
self._test_loop_fn(loop_fn, 2)
@test_util.enable_control_flow_v2
def test_tensor_list_reserve_while_loop(self):
# Here a loop invariant TensorList is captured by a while_loop, which then
# performs loop dependent operations on it, resulting in a loop variant
# output. This forces stacking of the variant handle captured by the
# while_loop.
# We handle this particular case by forcing vectorization of
# TensorListReserve operation.
def loop_fn(i):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
_, out_handle = control_flow_ops.while_loop(
lambda j, _: j < 2, lambda j, h:
(j + 1, list_ops.tensor_list_set_item(h, j, i)), (0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 2)
@test_util.enable_control_flow_v2
def test_tensor_list_while_loop_stacked_cond_stacked_list(self):
def loop_fn(i):
handle = list_ops.tensor_list_from_tensor([20, 21, 22, 23, i], [])
_, out_handle = control_flow_ops.while_loop(
lambda j, _: j < i,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, i)),
(0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
@test_util.enable_control_flow_v2
def test_tensor_list_while_loop_stacked_cond_stacked_list_unknown_shape(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve(None, 5, dtypes.int32)
_, handle = control_flow_ops.while_loop(
lambda j, _: j < 5,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, 0)),
(0, handle))
_, out_handle = control_flow_ops.while_loop(
lambda j, _: j < i,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, i)),
(0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
@test_util.enable_control_flow_v2
def test_tensor_list_while_loop_stacked_cond_unstacked_list(self):
def loop_fn(i):
handle = list_ops.tensor_list_from_tensor([20, 21, 22, 23, 24], [])
_, out_handle = control_flow_ops.while_loop(
lambda j, _: j < i,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, i)),
(0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_tensor_list_addn_already_stacked(self):
def loop_fn(i):
l1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l1 = list_ops.tensor_list_set_item(l1, 0, i)
l2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l2 = list_ops.tensor_list_set_item(l2, 1, i)
return list_ops.tensor_list_stack(math_ops.add_n([l1, l2]), dtypes.int32)
self._test_loop_fn(loop_fn, 2)
def test_tensor_list_addn_stacking_required(self):
l1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l1 = list_ops.tensor_list_set_item(l1, 1, 1)
def loop_fn(i):
l2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l2 = list_ops.tensor_list_set_item(l2, 1, i)
return list_ops.tensor_list_stack(
math_ops.add_n([l1, l2]), dtypes.int32)
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class TensorTest(PForTestCase):
def test_loop_variant_scatter_update_no_shape(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
])
def shapeless_func(tensor, indices, updates):
return array_ops.tensor_scatter_nd_update(tensor, indices, updates)
def loop_fn(i):
tensor = [0, 0, 0, 0, 0, 0, 0, 0]
indices = [[i], [i + 1], [i + 3], [i + 2]]
updates = [i, i - 10, i + 11, 12]
return shapeless_func(tensor, indices, updates)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_update_singles(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
tensor = [0, 0, 0, 0, 0, 0, 0, 0]
indices = [[i], [i+1], [i+3], [i+2]]
updates = [i, i-10, i+11, 12]
return array_ops.tensor_scatter_nd_update(tensor, indices, updates)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_update_slices(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
tensor = array_ops.zeros([10, 3], dtype=dtypes.int32)
indices = [[i+2], [4]]
updates = [[1, i*2, 3], [i+4, i-5, 6]]
return array_ops.tensor_scatter_nd_update(tensor, indices, updates)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_update_multi_dim_index(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
tensor = array_ops.zeros([10, 3], dtype=dtypes.int32)
indices = [[i+2, 1], [4, 2]]
updates = [i, 5]
return array_ops.tensor_scatter_nd_update(tensor, indices, updates)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_update_folded_indices(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
tensor = array_ops.zeros([5, 5])
indices = [
[[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]],
[[0, 4], [1, 3], [2, 2], [3, 1], [4, 0]],
]
updates = [
[1, i, 1, 1, 1],
[1, 1, i+2, 1, i-5],
]
return array_ops.tensor_scatter_nd_update(tensor, indices, updates)
self._test_loop_fn(loop_fn, 5)
class OptionalTest(PForTestCase):
def test_optional_from_value(self):
def loop_fn(i):
o = gen_dataset_ops.optional_from_value(
[i, i + 1, constant_op.constant(3)])
gen_dataset_ops.optional_none()
return gen_dataset_ops.optional_get_value(
o, [dtypes.int32, dtypes.int32, dtypes.int32],
[[], [], []])
self._test_loop_fn(loop_fn, 2)
class StackTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_stack_inside_loop_invariant(self):
def loop_fn(_):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, 1)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_stack_inside_push_loop_dependent(self):
def loop_fn(i):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, i)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_pop(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op = data_flow_ops.stack_push_v2(s, 5)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 6)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 7)
def loop_fn(_):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e1]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
with ops.control_dependencies([op]):
e1, e2 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
with ops.control_dependencies([e1, e2]):
e3 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
v1, v2, v3 = self._run_targets([e1, e2, e3], run_init=False)
self.assertAllEqual([7, 7], v1)
self.assertAllEqual([6, 6], v2)
self.assertAllEqual(5, v3)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_push(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
def loop_fn(_):
return data_flow_ops.stack_push_v2(s, 7)
with self.assertRaisesRegex(ValueError, "StackPushV2 not allowed.*"):
pfor_control_flow_ops.pfor(loop_fn, iters=2)
# TODO(agarwal): test nested while_loops. This currently requires converting a
# tf.cond.
class WhileV1Test(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.disable_control_flow_v2()
super(WhileV1Test, self).setUp()
def tearDown(self):
if self._enabled:
control_flow_v2_toggles.enable_control_flow_v2()
super(WhileV1Test, self).tearDown()
def test_while_outside_loop(self):
x = control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return x + i
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_with_stateful_ops(self):
def loop_fn(_):
return control_flow_ops.while_loop(
lambda j, x: j < 4, lambda j, x:
(j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
_, total = control_flow_ops.while_loop(
lambda j, _: j < lengths_i, lambda j, t:
(j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
return total
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_jacobian(self):
x = random_ops.random_uniform([1, 3])
y = random_ops.random_uniform([3, 3])
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# The above code does not work with tf.while_loop instead of pfor. So we
# manually compute the expected output here.
# Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
expected_output = y
for _ in range(3):
expected_output = math_ops.matmul(expected_output, y)
expected_output = array_ops.transpose(expected_output, [1, 0])
with session.Session() as sess:
out, expected = sess.run([out, expected_output])
self.assertAllClose(expected, out)
@test_util.run_v1_only("b/122612051")
def test_tensor_array_as_loop_variable(self):
def loop_fn(i):
def body(j, ta):
ta = ta.write(j, i + j * j)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))
return ta.stack()
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_read_tensor_array_partitioned_indices(self):
# Note that tensor array values are pfor loop dependent, and the while loop
# termination condition is also dependent on pfor iteration.
def loop_fn(i):
ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)
ta = ta.unstack(i + list(range(5)))
def body(j, s):
return j + 1, s + ta.read(j)
_, s = control_flow_ops.while_loop(lambda j, _: j < i, body, (0, 0))
return s
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_external_while_loop_grad(self):
# Here we test that external while_loops that are extended from inside pfor
# (due to gradient calls) are not actually converted. If the below was
# converted all pfor iterations would write to the same tensor array
# indices.
x = constant_op.constant(1.)
def body(j, ta):
ta = ta.write(j, x)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))
out = ta.stack()
def loop_fn(i):
out_i = array_ops.gather(out, i)
return gradient_ops.gradients(out_i, x)[0]
with session.Session() as sess:
# out is [x, x, x]. Hence the gradients should be [1, 1, 1].
self.assertAllEqual([1, 1, 1],
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))
@test_util.run_v1_only("b/122612051")
def test_tensor_array_grad(self):
inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
ta = ta.unstack(inp)
def loop_fn(i):
def body(j, x):
value = ta.gather([j])
value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
return j + 1, x + value
_, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
(0, array_ops.zeros([2])))
out = math_ops.reduce_prod(out)
return out, gradient_ops.gradients(out, inp)[0]
pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
# Note that tf.while_loop does not work in the setup above. So we manually
# construct the equivalent computation of the above loops here.
real_out = math_ops.reduce_sum(inp, axis=[0])
real_out = math_ops.reduce_prod(real_out, axis=[1])
# Note that gradients of real_out will accumulate the gradients across the
# output value. Hence we do the same aggregation on pfor_out_grad.
real_out_grad = gradient_ops.gradients(real_out, inp)[0]
sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])
with session.Session() as sess:
v1, v2, v1_grad, v2_grad = sess.run(
[pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
self.assertAllClose(v1, v2)
self.assertAllClose(v1_grad, v2_grad)
def dynamic_lstm_input_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)
sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)
return inputs, sequence_length
def create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):
cell = cell_fn(state_size)
inputs, sequence_length = dynamic_lstm_input_fn(batch_size, state_size,
max_steps)
inputs_ta = tensor_array_ops.TensorArray(
dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])
inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])
inputs_ta = inputs_ta.unstack(inputs_time_major)
zeros = array_ops.zeros([state_size])
def loop_fn(i):
sequence_length_i = array_ops.gather(sequence_length, i)
def body_fn(t, state, ta):
inputs_t = array_ops.expand_dims(
array_ops.gather(inputs_ta.read(t), i), 0)
output, new_state = cell(inputs_t, state)
output = array_ops.reshape(output, [-1])
# TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
# array_ops.where when t < min(sequence_length). Doing that requires
# supporting tf.cond pfor conversion.
done = t >= sequence_length_i
output = array_ops.where(done, zeros, output)
ta = ta.write(t, output)
new_state = [
array_ops.where(done, s, ns)
for s, ns in zip(nest.flatten(state), nest.flatten(new_state))
]
new_state = nest.pack_sequence_as(state, new_state)
return t + 1, new_state, ta
def condition_fn(t, _, unused):
del unused
return t < max_steps
initial_state = cell.zero_state(1, dtypes.float32)
_, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
0, initial_state,
tensor_array_ops.TensorArray(dtypes.float32, max_steps)
])
new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
new_state = nest.pack_sequence_as(initial_state, new_state)
return ta.stack(), new_state
pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)
tf_output = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float32))
return pfor_output, tf_output
@test_util.run_all_in_graph_and_eager_modes
class WhileV2Test(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.enable_control_flow_v2()
super(WhileV2Test, self).setUp()
def tearDown(self):
if not self._enabled:
control_flow_v2_toggles.disable_control_flow_v2()
super(WhileV2Test, self).tearDown()
def test_while_outside_loop(self):
def _f():
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return _f() + i
self._test_loop_fn(loop_fn, 3)
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3)
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3)
def test_while_with_stateful_ops(self):
def loop_fn(_):
j, _ = control_flow_ops.while_loop(
lambda j, x: j < 4, lambda j, x:
(j + 1, x + random_ops.random_uniform([])), [0, 0.])
return j
self._test_loop_fn(loop_fn, 3)
def test_while_with_variable(self):
v = resource_variable_ops.ResourceVariable(5.)
def loop_fn(_):
_, output = control_flow_ops.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + v), [0, 0.])
return output
self._test_loop_fn(loop_fn, 3)
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3)
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
return control_flow_ops.while_loop(
lambda j, _: j < lengths_i, lambda j, t:
(j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
self._test_loop_fn(loop_fn, 3)
def test_while_change_input_invariance(self):
# This tests cases where a loop invariant input to while has loop dependent
# operations applied to it inside the while body.
# It also test inputs that are passed through.
def loop_fn(i):
return control_flow_ops.while_loop(
lambda j, *_: j < i, lambda j, x, y, z, w:
(j + 1, x + i, y + x, z, w), [
0,
constant_op.constant(0),
constant_op.constant(1), i,
constant_op.constant(2)
])
self._test_loop_fn(loop_fn, 3)
def test_while_shape_invariants(self):
def loop_fn(i):
return control_flow_ops.while_loop(
lambda j, *_: j < 4,
lambda j, x, y: (j + 1, x + i, y + 1),
[0, constant_op.constant([0, 1]),
constant_op.constant([2, 3])],
shape_invariants=[
None,
tensor_shape.TensorShape([2]),
tensor_shape.TensorShape([2])
])
self._test_loop_fn(loop_fn, 3)
def test_while_jacobian(self):
# Note that we wrap the code below in a tf.function since we don't want the
# while_loop call to be evaluated eagerly using a python loop.
@def_function.function
def _f(x, y, use_pfor):
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
grad = gradient_ops.gradients(out_i, x)
return array_ops.reshape(grad[0], [-1])
if use_pfor:
return pfor_control_flow_ops.pfor(loop_fn, iters=3)
else:
return pfor_control_flow_ops.for_loop(
loop_fn, iters=3, loop_fn_dtypes=out.dtype)
x = constant_op.constant(np.random.uniform(size=(1, 3)))
y = constant_op.constant(np.random.uniform(size=(3, 3)))
self.assertAllClose(_f(x, y, True), _f(x, y, False))
def test_scan(self):
np.random.seed(seed=42)
data = np.random.randn(3).astype(np.float32)
def log_prob(x):
return math_ops.reduce_sum(functional_ops.scan_v2(
lambda _, yi: (x - yi)**2,
elems=data,
initializer=constant_op.constant(0.)))
x = variables.Variable(array_ops.ones([2]))
self.evaluate(x.initializer)
v_log_prob = lambda x: pfor_control_flow_ops.vectorized_map(log_prob, x)
theoretical, numerical = gradient_checker_v2.compute_gradient(
v_log_prob, (x,), delta=1e-3)
self.assertAllClose(theoretical, numerical, rtol=1e-2)
def test_scan_captured_variable(self):
if not context.executing_eagerly():
self.skipTest("Test only written for 2.x")
v = variables.Variable(math_ops.range(10, dtype=dtypes.float32))
def loop_fn(idx):
del idx
return functional_ops.scan_v2(lambda _, i: array_ops.gather(v, i),
elems=math_ops.range(v.shape[0]),
initializer=0.0)
with backprop.GradientTape() as tape:
result = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([2.] * 10, tape.gradient(result, v))
@test_util.run_all_in_graph_and_eager_modes
class NestedControlFlowTest(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.enable_control_flow_v2()
super(NestedControlFlowTest, self).setUp()
def tearDown(self):
if not self._enabled:
control_flow_v2_toggles.disable_control_flow_v2()
super(NestedControlFlowTest, self).tearDown()
def _cond(self, f=None, split=0):
if f is None:
f = lambda x, y: (x, y)
def _f(x, y):
return control_flow_ops.cond(y > split, lambda: f(x, y), lambda:
(x + 1., y))
return _f
def _while(self, f=None):
if f is None:
f = lambda x, y: (x, y)
def _f(x, y):
return control_flow_ops.while_loop(
lambda j, _: j < y, lambda j, t:
(j + 1, t + array_ops.gather(f(x, y)[0], j)), [0, x])[1], y
return _f
def _test_helper(self, f):
x = random_ops.random_uniform([5, 5])
y = constant_op.constant([4, -1, 2, -2, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return f(x_i, y_i)
self._test_loop_fn(loop_fn, 5)
def test_cond_while(self):
self._test_helper(self._cond(self._while()))
def test_while_cond(self):
self._test_helper(self._while(self._cond()))
def test_while_while(self):
self._test_helper(self._while(self._while()))
def test_cond_cond(self):
self._test_helper(self._cond(self._cond()))
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class StatelessIfTest(PForTestCase):
def test_loop_variant_cond(self):
x = [1, 2, 3, 4, 5.]
y = 2.5
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
# Note that the output has a combination of then and else branches being
# loop variant / invariant.
return cond_v2.cond_v2(x_i < y, lambda: (y - x_i, y, 1., 2.), lambda:
(x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
def test_loop_invariant_cond(self):
x = [1, 2, 3, 4, 5.]
y = 0.5
z = random_ops.random_uniform([])
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
# Note that the output has a combination of then and else branches being
# loop variant / invariant.
return cond_v2.cond_v2(z < y, lambda: (y - x_i, y, 1., 2.), lambda:
(x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
def test_empty_branch(self):
x = [1, 2, 3, 4, 5.]
y = 6.
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
return cond_v2.cond_v2(
x_i < y, # Note that else branch is empty.
lambda: (y - x_i, y, 1., 2.),
lambda: (x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class IfTest(PForTestCase):
def test_read_var(self):
self.skipTest("b/156438918") # Flaky
x = [1, 2, 3, 4, 5.]
y = 2.5
z = resource_variable_ops.ResourceVariable(5.)
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
return cond_v2.cond_v2(x_i < y, lambda: z - x_i, lambda: z + x_i)
self._test_loop_fn(loop_fn, iters=5)
class RNNTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_dynamic_rnn(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell, 3, 5,
7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
@test_util.run_v1_only("b/122612051")
def test_dynamic_lstm(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell, 3, 5,
7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop
# conversion don't look good. Some of it seems like lot of copies between host
# and device. Optimize that.
class Benchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
sess.run(init)
run_fn = sess.make_callable(targets)
run_fn() # Warm up
begin = time.time()
for _ in range(iters):
run_fn()
end = time.time()
avg_time_ms = 1000 * (end - begin) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_sess_run_overhead(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0)
self._run(x, 10000, name="session_run_overhead")
def benchmark_add(self):
with ops.Graph().as_default():
n = 256
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([n, params])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return x_i + y_i
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = x + y
self._run(manual, 1000, name="manual_add")
self._run(pfor_outputs, 1000, name="pfor_add")
self._run(while_outputs, 100, name="while_add")
def benchmark_matmul(self):
with ops.Graph().as_default():
n = 1024
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([params, params])
def loop_fn(i):
x_i = array_ops.expand_dims(array_ops.gather(x, i), 0)
return math_ops.matmul(x_i, y)
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = math_ops.matmul(x, y)
self._run(manual, 1000, name="manual_matmul")
self._run(pfor_outputs, 1000, name="pfor_matmul")
self._run(while_outputs, 100, name="while_matmul")
def benchmark_map_fn(self):
with ops.Graph().as_default():
b = 256
params = 1000
inp = random_ops.random_normal((b, params))
fn = lambda x: x * x
def pfor_map_fn(f, x):
return pfor_control_flow_ops.pfor(lambda i: f(array_ops.gather(x, i)),
array_ops.shape(x)[0])
map_output = map_fn.map_fn(fn, inp)
pfor_output = pfor_map_fn(fn, inp)
self._run(map_output, 100, name="tf_map_fn")
self._run(pfor_output, 100, name="pfor_map_fn")
def benchmark_basic_while(self):
with ops.Graph().as_default():
def loop_fn(i):
_, s = control_flow_ops.while_loop(lambda t, x: t < i, lambda t, x:
(t + 1, x + i), [0, 0])
return s
iters = 50
pfor_output = pfor_control_flow_ops.pfor(loop_fn, iters)
for_loop_output = pfor_control_flow_ops.for_loop(loop_fn, dtypes.int32,
iters)
self._run(pfor_output, 100, name="pfor_basic")
self._run(for_loop_output, 100, name="for_loop_basic")
def benchmark_dynamic_rnn(self):
with ops.Graph().as_default():
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell, 128,
512, 16)
self._run(pfor_outputs, 100, name="pfor_rnn")
self._run(tf_outputs, 100, name="tf_rnn")
def benchmark_reduction(self):
n = 1024
with ops.Graph().as_default():
x = random_ops.random_uniform([n, n])
w = random_ops.random_uniform([n, n])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return math_ops.reduce_sum(
math_ops.matmul(pfor_config.reduce_concat(x_i), w))
# Note that output_reduction will be tiled, so there may be some minor
# overheads compared to output_no_reduction.
output_reduction = pfor_control_flow_ops.pfor(loop_fn, n)
output_no_reduction = math_ops.reduce_sum(math_ops.matmul(x, w))
# Benchmark to test that reduction does not add overhead and its output is
# treated as loop invariant.
self._run(output_reduction, 30, name="matmul_reduction")
self._run(output_no_reduction, 30, name="matmul_no_reduction")
class SparseTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_var_loop_len(self):
num_iters = array_ops.placeholder(dtypes.int32)
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 3})
@test_util.run_v1_only("b/122612051")
def test_sparse_result_none_stacked(self):
num_iters = 10
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
indices = [[i, j] for i in range(num_iters) for j in range(3)]
values = [4, 5, 6] * num_iters
dense_shapes = [num_iters, 3]
# Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]
manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_all_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]
# Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_indices_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, [1], [num_iters])
# Expected result: identity matrix size num_iters * num_iters
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_values_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]
# Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]
# Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked_2D(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)
shape = array_ops.concat([i, i], 0)
return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]
# Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],
[1] * num_iters,
(num_iters, num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
# Dummy CompositeTensor to test CompositeTensor support.
class Particle(composite_tensor.CompositeTensor):
"""A (batch of) particles each defined by a mass and a scalar velocity."""
def __init__(self, mass, velocity):
mass = ops.convert_to_tensor(mass)
velocity = ops.convert_to_tensor(velocity)
self.shape = array_ops.broadcast_static_shape(mass.shape, velocity.shape)
self.mass = mass
self.velocity = velocity
@property
def _type_spec(self):
return ParticleSpec(
type_spec.type_spec_from_value(self.mass),
type_spec.type_spec_from_value(self.velocity))
class ParticleSpec(type_spec.BatchableTypeSpec):
def __init__(self, mass, velocity):
self.shape = array_ops.broadcast_static_shape(
mass.shape, velocity.shape)
self.mass = mass
self.velocity = velocity
def _serialize(self):
return (self.mass, self.velocity)
@property
def value_type(self):
return Particle
@property
def _component_specs(self):
return (self.mass, self.velocity)
def _to_components(self, value):
return (value.mass, value.velocity)
def _from_components(self, components):
return Particle(*components)
def _pad_shape_to_full_rank(self, s):
"""Pad component shapes with 1's so all components have the same rank."""
return tensor_shape.TensorShape(
[1] * (self.shape.ndims - s.ndims)).concatenate(s)
def _batch(self, batch_size):
return ParticleSpec(
mass=tensor_spec.TensorSpec(
dtype=self.mass.dtype,
shape=tensor_shape.TensorShape([batch_size]).concatenate(
self._pad_shape_to_full_rank(self.mass.shape))),
velocity=tensor_spec.TensorSpec(
dtype=self.velocity.dtype,
shape=tensor_shape.TensorShape([batch_size]).concatenate(
self._pad_shape_to_full_rank(self.velocity.shape))))
def _unbatch(self):
return ParticleSpec(
tensor_spec.TensorSpec(dtype=self.mass.dtype,
shape=self.mass.shape[1:]),
tensor_spec.TensorSpec(dtype=self.velocity.dtype,
shape=self.velocity.shape[1:]))
def _to_tensor_list(self, value):
return [array_ops.reshape(
value.mass,
self._pad_shape_to_full_rank(value.mass.shape)),
array_ops.reshape(
value.velocity,
self._pad_shape_to_full_rank(value.velocity.shape))]
class CompositeTensorTest(PForTestCase, parameterized.TestCase):
@parameterized.parameters((None,), (3,))
def test_create_composite_inside_loop(self, parallel_iterations):
num_particles = 10
velocities = random_ops.random_uniform([num_particles])
particles = pfor_control_flow_ops.pfor(
# Build a batch of particles all with the same mass.
lambda i: Particle(mass=4., velocity=array_ops.gather(velocities, i)),
num_particles,
parallel_iterations=parallel_iterations)
particles_mass, particles_velocity, velocities = self.evaluate(
(particles.mass, particles.velocity, velocities))
self.assertAllEqual(particles_mass, 4. * np.ones([num_particles]))
self.assertAllEqual(particles_velocity, velocities)
@parameterized.parameters((None,), (3,))
def test_composite_is_converted_to_batched_tensor(
self, parallel_iterations):
particles = pfor_control_flow_ops.pfor(
lambda _: Particle(mass=random_ops.random_uniform([3]), # pylint: disable=g-long-lambda
velocity=random_ops.random_uniform([5, 3])),
4,
parallel_iterations=parallel_iterations)
# Naively batching the component shapes would give `[4, 3]` and `[4, 5, 3]`
# which have no consistent broadcast shape.
self.assertEqual(particles.mass.shape, [4, 1, 3])
self.assertAllEqual(particles.velocity.shape, [4, 5, 3])
def test_vectorized_map_gathers_composite_tensors(self):
particles = Particle(mass=[1., 2., 3., 4., 5.],
velocity=[1., 2., 3., 4., 5.])
self.assertAllEqual(
pfor_control_flow_ops.vectorized_map(
lambda x: x.mass * x.velocity, particles),
particles.mass * particles.velocity)
def test_vectorized_map_of_ragged_tensors(self):
# Vmap should be able to handle ragged Tensors as long as they're not
# *actually* ragged.
ragged = ragged_tensor.RaggedTensor.from_uniform_row_length(
ragged_tensor.RaggedTensor.from_row_lengths(
values=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
row_lengths=[3, 3, 3, 3]),
uniform_row_length=2) # Overall shape [2, 2, 3].
self.assertAllEqual(
pfor_control_flow_ops.vectorized_map(
lambda x: x.to_tensor(shape=[2, 3]), ragged),
ragged.to_tensor(shape=[2, 2, 3]))
class ParsingTest(PForTestCase):
def test_decode_csv(self):
csv_tensor = constant_op.constant([["1:2:3"], ["::"], ["7:8:9"]])
kwargs = {"record_defaults": [[10], [20], [30]], "field_delim": ":"}
def loop_fn(i):
line = array_ops.gather(csv_tensor, i)
return parsing_ops.decode_csv(line, **kwargs)
self._test_loop_fn(loop_fn, iters=3)
@test_util.run_v1_only("b/122612051")
def test_parse_single_example(self):
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
examples = constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
])
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
def loop_fn(i):
example_proto = array_ops.gather(examples, i)
f = parsing_ops.parse_single_example(example_proto, features)
return f
pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)
manual = parsing_ops.parse_example(examples, features)
self.run_and_assert_equal(pfor, manual)
class PartitionedCallTest(PForTestCase):
def test_simple(self):
@def_function.function
def f(x):
return math_ops.square(x) + 1
z = random_ops.random_uniform([4])
def loop_fn(i):
return f(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_nested_calls(self):
@def_function.function
def inner(x):
return math_ops.square(x)
@def_function.function
def outer(y):
return math_ops.reduce_sum(inner(y)) + 2
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
return outer(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_nested_definition(self):
@def_function.function
def outer(y):
@def_function.function
def inner(x):
return math_ops.square(x) + 1
return math_ops.reduce_sum(inner(y)) + 2
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
return outer(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_gradients(self):
@def_function.function
def f(x):
return math_ops.square(x) + 1
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
z_i = array_ops.gather(z, i)
with backprop.GradientTape() as g:
g.watch(z_i)
out = f(z_i)
return out, g.gradient(out, z_i)
self._test_loop_fn(loop_fn, 4)
def test_stateful_with_gradients(self):
z = random_ops.random_uniform([4, 2])
v = variables.Variable(z[0])
@def_function.function
def f(x):
return math_ops.square(x) + v + 1
def loop_fn(i):
z_i = array_ops.gather(z, i)
with backprop.GradientTape() as g:
g.watch(z_i)
out = f(z_i)
return out, g.gradient(out, z_i)
self._test_loop_fn(loop_fn, 4)
class SpectralTest(PForTestCase, parameterized.TestCase):
@parameterized.parameters(
(fft_ops.fft,),
(fft_ops.fft2d,),
(fft_ops.fft3d,),
(fft_ops.ifft,),
(fft_ops.ifft2d,),
(fft_ops.ifft3d,),
)
def test_fft(self, op_func):
shape = [2, 3, 4, 3, 4]
x = np.random.uniform(size=shape) + 1j * np.random.uniform(size=shape)
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
self._test_loop_fn(loop_fn, 2)
@parameterized.parameters(
(fft_ops.rfft,),
(fft_ops.rfft2d,),
(fft_ops.rfft3d,),
)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="Disable subtest on ROCm due to rocfft issues")
def test_rfft(self, op_func):
for dtype in (dtypes.float32, dtypes.float64):
x = random_ops.random_uniform([2, 3, 4, 3, 4], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
@parameterized.parameters(
(fft_ops.irfft,),
(fft_ops.irfft2d,),
(fft_ops.irfft3d,),
)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="Disable subtest on ROCm due to rocfft issues")
def test_irfft(self, op_func):
if config.list_physical_devices("GPU"):
# TODO(b/149957923): The test is flaky
self.skipTest("b/149957923: irfft vectorization flaky")
for dtype in (dtypes.complex64, dtypes.complex128):
shape = [2, 3, 4, 3, 4]
x = np.random.uniform(size=shape) + 1j * np.random.uniform(size=shape)
x = math_ops.cast(x, dtype=dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
class VariableTest(PForTestCase):
def test_create_variable_once(self):
x = array_ops.ones(shape=(3, 2, 2), dtype=dtypes.float32)
y = array_ops.ones(shape=(2, 3), dtype=dtypes.float32)
a_var = []
def f(z):
if not a_var:
a_var.append(variables.Variable(lambda: y, name="a"))
return math_ops.matmul(z, a_var[0] / 16)
pfor_control_flow_ops.vectorized_map(f, x)
@test_util.run_v2_only
def test_create_variable_repeated(self):
x = array_ops.ones(shape=(3, 2, 2), dtype=dtypes.float32)
y = array_ops.ones(shape=(2, 3), dtype=dtypes.float32)
def f(z):
a_var = variables.Variable(lambda: y, name="a") / 4
return math_ops.matmul(z, a_var / 16)
# Note that this error is only raised under v2 behavior.
with self.assertRaisesRegex(
ValueError, "singleton tf.Variable.*on the first call"):
pfor_control_flow_ops.vectorized_map(f, x)
@test_util.run_all_in_graph_and_eager_modes
def test_variable_shape(self):
v = resource_variable_ops.ResourceVariable([1, 2])
def loop_fn(_):
return resource_variable_ops.variable_shape(v.handle)
self._test_loop_fn(loop_fn, 2)
if __name__ == "__main__":
test.main()
Update control_flow_ops_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pfor and for_loop."""
# pylint: disable=g-direct-tensorflow-import
import functools
import sys
import time
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_v2_toggles
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_list_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class PForTest(PForTestCase):
def test_op_conversion_fallback_to_while_loop(self):
# Note that we used top_k op for this test. If a converter gets defined for
# it, we will need to find another op for which a converter has not been
# defined.
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return nn.top_k(x_i)
with self.assertRaisesRegex(ValueError, "No pfor vectorization"):
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=False)
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=True)
def test_nested_defun(self):
def loop_fn(a):
range(array_ops.constant(5))
return 1 + 1
@def_function.function
def f():
return self._test_loop_fn(loop_fn,2)
self.assert(2, f())
def test_parallel_iterations(self):
for parallel_iterations in [2, 3, 8, 10]:
x = random_ops.random_uniform([8, 3])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.gather(x, i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 8, parallel_iterations=parallel_iterations)
self._test_loop_fn(
loop_fn,
4 * constant_op.constant(2),
parallel_iterations=parallel_iterations)
def test_parallel_iterations_preserves_static_shape(self):
for parallel_iterations in [2, 3, 8, 10]:
x = pfor_control_flow_ops.pfor(
lambda _: random_ops.random_uniform([2, 3]),
8,
parallel_iterations=parallel_iterations)
self.assertAllEqual(x.shape, [8, 2, 3])
def test_parallel_iterations_zero(self):
with self.assertRaisesRegex(ValueError, "positive integer"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=0)
with self.assertRaisesRegex(TypeError, "positive integer"):
pfor_control_flow_ops.for_loop(
lambda i: 1, dtypes.int32, 8, parallel_iterations=0)
def test_parallel_iterations_one(self):
with self.assertRaisesRegex(ValueError, "Use `for_loop` instead"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=1)
def test_vectorized_map(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
result = pfor_control_flow_ops.vectorized_map(compute,
array_ops.ones((10, 5, 3)))
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
def test_vectorized_map_with_dynamic_shape(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
x = array_ops.placeholder_with_default(
array_ops.ones((10, 5, 3)), shape=None)
result = pfor_control_flow_ops.vectorized_map(compute, x)
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
def test_where_shape(self):
@def_function.function
def f():
a = constant_op.constant([[1.], [1.]])
b = constant_op.constant([1.])
result = pfor_control_flow_ops.vectorized_map(
lambda x: array_ops.where(x > 0, x, b), a)
return result.shape
self.assertAllEqual([2, 1], f())
def test_vectorized_map_broadcasts_unit_dimensions(self):
convert_with_static_shape = ops.convert_to_tensor
convert_with_dynamic_shape = (
lambda x: array_ops.placeholder_with_default(x, shape=None))
for convert in (convert_with_static_shape, convert_with_dynamic_shape):
a = convert([3.1])
b = convert([-2., 6., 9.])
# One elem with leading unit dimension.
a_plus_1 = pfor_control_flow_ops.vectorized_map(lambda a: a + 1, a)
self.assertAllEqual(*self.evaluate((a_plus_1, a + 1)))
# Two elems, both with leading unit dimension.
a_plus_a = pfor_control_flow_ops.vectorized_map(sum, (a, a))
self.assertAllEqual(*self.evaluate((a_plus_a, a + a)))
# Elem w/ unit dimension broadcast against elem with batch dim.
a_plus_b = pfor_control_flow_ops.vectorized_map(sum, (a, b))
self.assertAllEqual(*self.evaluate((a_plus_b, a + b)))
def test_vectorized_map_example_1(self):
def outer_product(a):
return math_ops.tensordot(a, a, 0)
batch_size = 100
a = array_ops.ones((batch_size, 32, 32))
c = pfor_control_flow_ops.vectorized_map(outer_product, a)
self.assertAllEqual((batch_size, 32, 32, 32, 32), c.shape)
def test_disable_tf_function(self):
def_function.run_functions_eagerly(True)
# vectorized_map should ignore disabling tf.functions
self.assertTrue(def_function.functions_run_eagerly())
self.assertAllEqual([0, 1, 4, 9],
pfor_control_flow_ops.vectorized_map(
lambda x: x * x, math_ops.range(4)))
self.assertTrue(def_function.functions_run_eagerly())
def_function.run_functions_eagerly(False)
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesTest(PForTestCase):
def test_indexed_slices(self):
def loop_fn(i):
return indexed_slices.IndexedSlices(
indices=i, values=array_ops.reshape(i, [1]), dense_shape=[3, 1])
self._test_loop_fn(loop_fn, 2)
def test_indexed_slices_components(self):
def loop_fn(i):
slices = indexed_slices.IndexedSlices(
indices=i, values=array_ops.reshape(i, [1]), dense_shape=[3, 1])
# Note that returning the components inside the slice avoids
# densification, which may be more efficient.
return slices.values, slices.indices
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class ReductionTest(PForTestCase):
def test_reduce(self):
def reduce_fn(p, q):
return math_ops.reduce_mean(p + q, axis=0)
x = random_ops.random_uniform([4, 3, 2])
y = random_ops.random_uniform([4, 3, 2])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
reduced = pfor_config.reduce(reduce_fn, x_i, y_i)
return reduced + x_i
output = pfor_control_flow_ops.pfor(loop_fn, 4)
ans = reduce_fn(x, y) + x
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_concat(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
vectorized_value = pfor_config.reduce_concat(x_i)
mean_value = math_ops.reduce_mean(vectorized_value, axis=0)
return x_i - mean_value
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_mean(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_sum(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_sum(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_sum(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_class(self):
x = random_ops.random_uniform([8, 3])
class LoopFn:
def __init__(self):
pass
def __call__(self, i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(LoopFn(), 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_functools_partial(self):
x = random_ops.random_uniform([8, 3])
def fn(i, pfor_config, dummy=None):
del dummy
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
loop_fn = functools.partial(fn, dummy=1)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_parallel_iterations(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return pfor_config.reduce_sum(x_i)
with self.assertRaisesRegex(ValueError,
"`parallel_iterations` currently unsupported"):
pfor_control_flow_ops.pfor(loop_fn, 8, parallel_iterations=2)
def test_var_loop_len(self):
if context.executing_eagerly():
self.skipTest("Variable length not possible under eager execution.")
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
return pfor_config.reduce_sum(array_ops.gather(x, i))
num_iters = array_ops.placeholder(dtypes.int32)
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 8})
@test_util.run_all_in_graph_and_eager_modes
class BitwiseTest(PForTestCase):
def test_unary_cwise(self):
for op in [bitwise_ops.invert]:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
return op(x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_binary_cwise(self):
binary_ops = [
bitwise_ops.bitwise_and,
bitwise_ops.bitwise_or,
bitwise_ops.bitwise_xor,
bitwise_ops.left_shift,
bitwise_ops.right_shift,
]
for op in binary_ops:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
y = random_ops.random_uniform([3, 5], maxval=10, dtype=dtypes.int32)
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend(t.dtype for t in outputs)
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
@test_util.run_all_in_graph_and_eager_modes
class ImageTest(PForTestCase):
def test_adjust_contrast(self):
images = random_ops.random_uniform([3, 2, 4, 4, 3])
def loop_fn(i):
image = array_ops.gather(images, i)
return image_ops.adjust_contrast(image, 2.0)
self._test_loop_fn(loop_fn, 3)
def test_adjust_hue(self):
images = random_ops.random_uniform([3, 2, 4, 4, 3])
def loop_fn(i):
image = array_ops.gather(images, i)
return image_ops.adjust_hue(image, .25)
self._test_loop_fn(loop_fn, 3)
def test_adjust_saturation(self):
images = random_ops.random_uniform([3, 2, 4, 4, 3])
def loop_fn(i):
image = array_ops.gather(images, i)
return image_ops.adjust_saturation(image, 0.1)
self._test_loop_fn(loop_fn, 3)
@test_util.run_all_in_graph_and_eager_modes
class NNTest(PForTestCase):
def test_conv2d(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filt = random_ops.random_uniform([3, 3, 3, 7])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return nn.conv2d(
x1, filt, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_input(self):
x_shape = [2, 12, 12, 3]
filt = random_ops.random_uniform([3, 3, 3, 7])
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
return nn.conv2d_backprop_input(
x_shape,
filt,
grad1,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_filter(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
x_0 = array_ops.gather(x, 0)
filter_sizes = [3, 3, 3, 7]
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return [
nn.conv2d_backprop_filter(
inp,
filter_sizes,
grad_i,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC") for inp in [x_i, x_0]
]
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filt = random_ops.random_uniform([3, 3, 3, 3, 2])
def loop_fn(i):
x1 = array_ops.gather(x, i)
filt1 = array_ops.gather(filt, i)
return nn.depthwise_conv2d_native(
x1, filt1, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native_backprop_input(self):
x_shape = [2, 12, 12, 3]
filt = random_ops.random_uniform([3, 3, 3, 3, 2])
grad = random_ops.random_uniform([3, 2, 5, 5, 6])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
filt1 = array_ops.gather(filt, i)
return nn.depthwise_conv2d_native_backprop_input(
x_shape,
filt1,
grad1,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native_backprop_filter(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filter_sizes = [3, 3, 3, 2]
grad = random_ops.random_uniform([3, 2, 5, 5, 6])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return nn.depthwise_conv2d_native_backprop_filter(
x_i,
filter_sizes,
grad_i,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native_nchw(self):
if not test_util.is_gpu_available():
self.skipTest("NCHW only works on GPU")
x = random_ops.random_uniform([3, 2, 3, 12, 12])
filt = random_ops.random_uniform([3, 3, 3, 3, 2])
def loop_fn(i):
x1 = array_ops.gather(x, i)
filt1 = array_ops.gather(filt, i)
return nn.depthwise_conv2d_native(
x1, filt1, strides=[1, 1, 2, 2], padding="VALID", data_format="NCHW")
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native_backprop_input_nchw(self):
if not test_util.is_gpu_available():
self.skipTest("NCHW only works on GPU")
x_shape = [2, 3, 12, 12]
filt = random_ops.random_uniform([3, 3, 3, 3, 2])
grad = random_ops.random_uniform([3, 2, 6, 5, 5])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
filt1 = array_ops.gather(filt, i)
return nn.depthwise_conv2d_native_backprop_input(
x_shape,
filt1,
grad1,
strides=[1, 1, 2, 2],
padding="VALID",
data_format="NCHW")
self._test_loop_fn(loop_fn, 3)
def test_depthwise_conv2d_native_backprop_filter_nchw(self):
if not test_util.is_gpu_available():
self.skipTest("NCHW only works on GPU")
x = random_ops.random_uniform([3, 2, 3, 12, 12])
filter_sizes = [3, 3, 3, 2]
grad = random_ops.random_uniform([3, 2, 6, 5, 5])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return nn.depthwise_conv2d_native_backprop_filter(
x_i,
filter_sizes,
grad_i,
strides=[1, 1, 2, 2],
padding="VALID",
data_format="NCHW")
self._test_loop_fn(loop_fn, 3)
def test_roll(self):
x = random_ops.random_uniform([3, 6, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return manip_ops.roll(x_i, 3, axis=1)
self._test_loop_fn(loop_fn, 3)
def test_ensure_shape(self):
x = random_ops.random_uniform([3, 6, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.ensure_shape(x_i, [6, 7])
self._test_loop_fn(loop_fn, 3)
def test_loop_variant_roll_shift(self):
x = random_ops.random_uniform([3, 5, 6, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return manip_ops.roll(x_i, [i - 2, -1, i], axis=[1, 2, 2])
self._test_loop_fn(loop_fn, 3)
def test_loop_variant_roll_scalar_shift(self):
x = random_ops.random_uniform([5, 5, 6])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return manip_ops.roll(x_i, i, axis=0)
self._test_loop_fn(loop_fn, 5)
def test_avg_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool(
x1,
ksize,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3)
def test_avg_pool3d(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([5, 3, 7, 6, 6, 5])
g.watch(x)
ksize = [1, 2, 2, 2, 1]
strides = [1, 2, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3)
def test_max_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3)
def test_max_pool_v2(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = gen_nn_ops.max_pool_v2(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3)
def test_max_pool3d(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 1, 3, 3, 1]
strides = [1, 1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3)
def test_fused_batch_norm(self):
data_formats = ["NHWC"]
if test.is_gpu_available():
data_formats.append("NCHW")
for is_training in (True, False):
for data_format in data_formats:
with backprop.GradientTape(persistent=True) as g:
if data_format == "NCHW":
x = random_ops.random_uniform([3, 1, 2, 5, 5])
else:
x = random_ops.random_uniform([3, 1, 5, 5, 2])
g.watch(x)
scale = random_ops.random_uniform([2])
g.watch(scale)
offset = random_ops.random_uniform([2])
g.watch(offset)
mean = None if is_training else random_ops.random_uniform([2])
variance = None if is_training else random_ops.random_uniform([2])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
outputs = nn.fused_batch_norm(
x1,
scale,
offset,
mean=mean,
variance=variance,
epsilon=0.01,
data_format=data_format,
is_training=is_training)
outputs = list(outputs)
# We only test the first value of outputs when is_training is
# False. It looks like CPU and GPU have different outputs for
# batch_mean and batch_variance for this case.
if not is_training:
outputs[1] = constant_op.constant(0.)
outputs[2] = constant_op.constant(0.)
loss = nn.l2_loss(outputs[0])
if is_training:
gradients = g.gradient(loss, [x1, scale, offset])
else:
gradients = [constant_op.constant(0.)] * 3
return outputs + gradients
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_log_softmax(self):
logits = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return (nn.log_softmax(logits_i), nn.log_softmax(logits_i, axis=0),
nn.log_softmax(logits_i, axis=-1))
self._test_loop_fn(loop_fn, 3)
def test_softmax(self):
logits = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return (nn.softmax(logits_i), nn.softmax(logits_i, axis=0),
nn.softmax(logits_i, axis=-1))
self._test_loop_fn(loop_fn, 3)
def test_softmax_cross_entropy_with_logits(self):
with backprop.GradientTape(persistent=True) as g:
logits = random_ops.random_uniform([3, 2, 4])
g.watch(logits)
labels = random_ops.random_uniform([3, 2, 4])
labels /= math_ops.reduce_sum(labels, axis=[2], keepdims=True)
def loop_fn(i):
with g:
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
total_loss = math_ops.reduce_sum(loss)
return loss, g.gradient(total_loss, logits_i)
self._test_loop_fn(loop_fn, 3)
def test_sparse_softmax_cross_entropy_with_logits(self):
logits = random_ops.random_uniform([3, 2, 4])
labels = random_ops.random_uniform(
shape=[3, 2], maxval=4, dtype=dtypes.int32)
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.sparse_softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
return loss
self._test_loop_fn(loop_fn, 3)
class RandomTest(PForTestCase):
# The random values generated in the two implementations are not guaranteed to
# match. So we only check the returned shapes.
def run_and_assert_equal(self, targets1, targets2, rtol=1e-4, atol=1e-5):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
def test_random_uniform(self):
def loop_fn(_):
return random_ops.random_uniform([3])
self._test_loop_fn(loop_fn, 5)
def test_random_uniform_int(self):
def loop_fn(_):
return random_ops.random_uniform([3], maxval=1, dtype=dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_random_standard_normal(self):
def loop_fn(_):
return random_ops.random_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_truncated_normal(self):
def loop_fn(_):
return random_ops.truncated_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma_invariant_alpha(self):
def loop_fn(_):
return random_ops.random_gamma([3], alpha=[0.5])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma_varying_alpha(self):
alphas = math_ops.exp(random_ops.random_normal([5, 3, 2]))
def loop_fn(i):
alphas_i = array_ops.gather(alphas, i)
# Test both scalar and non-scalar params and shapes.
return (random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[]),
random_ops.random_gamma(alpha=alphas_i, shape=[]),
random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[3]),
random_ops.random_gamma(alpha=alphas_i, shape=[3]))
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2_invariant_rate(self):
def loop_fn(_):
return random_ops.random_poisson(lam=[1.3], shape=[3])
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2_varying_rate(self):
rates = math_ops.exp(random_ops.random_normal([5, 3, 2]))
def loop_fn(i):
rates_i = array_ops.gather(rates, i)
# Test both scalar and non-scalar params and shapes.
return (random_ops.random_poisson(lam=rates_i[0, 0], shape=[]),
random_ops.random_poisson(lam=rates_i, shape=[]),
random_ops.random_poisson(lam=rates_i[0, 0], shape=[3]),
random_ops.random_poisson(lam=rates_i, shape=[3]))
self._test_loop_fn(loop_fn, 5)
def test_random_multinomial_invariant_logits(self):
def loop_fn(_):
return random_ops.categorical(logits=[[1., -1.]], num_samples=3)
self._test_loop_fn(loop_fn, 5)
def test_random_multinomial_varying_logits(self):
logits = random_ops.random_normal([5, 3, 2])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return random_ops.categorical(logits_i, num_samples=3)
self._test_loop_fn(loop_fn, 5)
class StatelessRandomTest(PForTestCase):
# This test currently only tests that the vectorized and non-vectorized
# outputs have same shapes. This is needed since under XLA compilation,
# stateless random numbers can generate different random numbers.
# TODO(agarwal): switch to checking for actual values matching once
# b/149402339 is resolved.
def run_and_assert_equal(self, targets1, targets2, rtol=1e-4, atol=1e-5):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
# TODO(agarwal): add tests for other random functions
def test_multinomial(self):
seeds = [[1, 2], [3, 4]]
logits = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
logits_0 = array_ops.gather(logits, 0)
logits_i = array_ops.gather(logits, i)
seeds_0 = array_ops.gather(seeds, 0)
seeds_i = array_ops.gather(seeds, i)
return (stateless_random_ops.stateless_categorical(
logits=logits_i, num_samples=3, seed=seeds_i),
stateless_random_ops.stateless_categorical(
logits=logits_i, num_samples=3, seed=seeds_0),
stateless_random_ops.stateless_categorical(
logits=logits_0, num_samples=3, seed=seeds_i),
stateless_random_ops.stateless_categorical(
logits=logits_0, num_samples=3, seed=seeds_0))
self._test_loop_fn(loop_fn, 2)
class LoggingTest(PForTestCase):
def test_print(self):
x = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return logging_ops.Print(
x1, [x1, "x1", array_ops.shape(x1)], summarize=10)
self._test_loop_fn(loop_fn, 3)
def test_print_v2(self):
x = constant_op.constant([1, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
with ops.control_dependencies([
logging_ops.print_v2(
x1, "x1", array_ops.shape(x1), summarize=10)]):
return array_ops.identity(x1)
self._test_loop_fn(loop_fn, 3)
with self.captureWritesToStream(sys.stderr) as printed:
self.evaluate(pfor_control_flow_ops.pfor(loop_fn, 3))
self.assertIn("[1 2 3] x1 []", printed.contents())
def test_assert(self):
def loop_fn(i):
return control_flow_ops.Assert(i < 10, [i, [10], [i + 1]])
# TODO(agarwal): make this work with for_loop.
with session.Session() as sess:
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))
sess.run(pfor_control_flow_ops.pfor(
lambda i, pfor_config: loop_fn(i), 3))
class TensorArrayTest(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.disable_control_flow_v2()
super(TensorArrayTest, self).setUp()
def tearDown(self):
if self._enabled:
control_flow_v2_toggles.enable_control_flow_v2()
super(TensorArrayTest, self).tearDown()
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_read(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.read(i), ta.read(0)
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_gather(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.gather([i]), ta.gather([0, 1])
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_write_and_scatter(self):
t = tensor_array_ops.TensorArray(dtypes.int32, 10, clear_after_read=False)
handle = t.handle
def loop_fn(i):
ta = t.write(i + 2, 2 * i).write(i, 5)
ta = ta.scatter([4 + i], [4]).scatter([6 + i, 8 + i], [6 + i, 8 + i])
return ta.flow
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
out1 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t1[-1]).stack()
output1 = self._run_targets(out1)
t2 = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, iters=2)
out2 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t2[-1]).stack()
output2 = self._run_targets(out2)
self.assertAllClose(output2, output1)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_write(self):
def loop_fn(i):
# TODO(agarwal): switching the order of writes to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).write(0,
i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(dtypes.int32, 1).write(0, 1)
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_scatter(self):
def loop_fn(i):
# TODO(agarwal): switching the order of scatter to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0],
[[i, 2]]).scatter([1],
[[1, 2]])
ta2 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0], [3]).scatter([1], [4])
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_read(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.read(0), ta2.read(0), ta2.read(i)
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_gather(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.gather([0, 1]), ta2.gather([0, 1]), ta2.gather([i])
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_grad(self):
x = random_ops.random_uniform([3, 2])
ta = tensor_array_ops.TensorArray(
dtypes.float32, 3, clear_after_read=False).unstack(x)
y = math_ops.square(ta.stack())
def loop_fn(i):
y_i = array_ops.gather(y, i)
grad = gradient_ops.gradients(y_i, x)[0]
return array_ops.gather(grad, i)
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# y = x * x. Hence dy/dx = 2 * x.
actual_grad = 2.0 * x
with session.Session() as sess:
actual_grad, computed_grad = sess.run([t1, actual_grad])
self.assertAllClose(actual_grad, computed_grad)
@test_util.run_all_in_graph_and_eager_modes
class TensorListTest(PForTestCase):
def test_create_outside_and_write(self):
handle1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
def loop_fn(i):
h1 = list_ops.tensor_list_set_item(handle1, 0, i)
h1 = list_ops.tensor_list_set_item(h1, 1, 1)
h2 = list_ops.tensor_list_set_item(handle2, 0, 1)
return (list_ops.tensor_list_stack(h1, dtypes.int32),
list_ops.tensor_list_stack(h2, dtypes.int32))
self._test_loop_fn(loop_fn, 3)
def _make_graph_def(self, text):
ret = graph_pb2.GraphDef()
text_format.Parse(text, ret)
return ret
def test_no_fallback_with_internal_stacking(self):
# Create an op (really a function) that pfor definitely does not have a
# converter for. Assumes pfor does not start looking up function definitions
# for op-type-is-function-name calls.
@def_function.function
def opaque_list_fetch(x):
array_ops.identity(x)
return list_ops.tensor_list_get_item(x, 0, dtypes.int32)
external_handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
opaque_list_fetch_concrete = opaque_list_fetch.get_concrete_function(
external_handle)
opaque_list_fetch_name = opaque_list_fetch_concrete.name
def loop_fn(i):
h1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h1 = list_ops.tensor_list_set_item(h1, 0, i)
opaque_list_fetch_concrete.add_to_graph()
graph_def = self._make_graph_def("""
node { name: 'x' op: 'Placeholder'
attr { key: 'dtype' value { type: DT_FLOAT } }}
node { name: 'fn' op: '""" + opaque_list_fetch_name.decode()
+ """' input: 'x:0' }""")
return importer.import_graph_def(
graph_def,
input_map={"x:0": h1},
return_elements=["fn"],
name="import")[0].outputs[0]
with self.assertRaisesRegex(ValueError, "No pfor vectorization"):
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=False)
with self.assertRaisesRegex(ValueError, "No pfor vectorization"):
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=True)
def test_create_inside_and_write(self):
def loop_fn(i):
h1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h1 = list_ops.tensor_list_set_item(h1, 0, i)
h1 = list_ops.tensor_list_set_item(h1, 1, 1)
h2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h2 = list_ops.tensor_list_set_item(h2, 0, 1)
return (list_ops.tensor_list_stack(h1, dtypes.int32),
list_ops.tensor_list_stack(h2, dtypes.int32))
self._test_loop_fn(loop_fn, 3)
def test_create_outside_and_read(self):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle = list_ops.tensor_list_set_item(handle, 0, 0)
handle = list_ops.tensor_list_set_item(handle, 1, 1)
def loop_fn(i):
return (list_ops.tensor_list_get_item(handle, i, dtypes.int32),
list_ops.tensor_list_get_item(handle, 0, dtypes.int32),
list_ops.tensor_list_length(handle),
list_ops.tensor_list_element_shape(handle, dtypes.int32),
list_ops.tensor_list_element_shape(handle, dtypes.int64))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_read(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle = list_ops.tensor_list_set_item(handle, 0, i)
handle = list_ops.tensor_list_set_item(handle, 1, 1)
return (list_ops.tensor_list_get_item(handle, 0, dtypes.int32),
list_ops.tensor_list_get_item(handle, i, dtypes.int32),
list_ops.tensor_list_length(handle),
list_ops.tensor_list_element_shape(handle, dtypes.int32),
list_ops.tensor_list_element_shape(handle, dtypes.int64))
self._test_loop_fn(loop_fn, 2)
def test_create_outside_and_push_back(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_push_back(h, [i, 2])
handle = list_ops.tensor_list_push_back(handle, [1, 2])
handle = list_ops.tensor_list_push_back(handle, [1, 2])
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_create_inside_and_push_back(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_push_back(handle, [i, 2])
handle = list_ops.tensor_list_push_back(handle, [1, 2])
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_pop_back_no_shape(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_push_back(handle, [1, 2])
handle = list_ops.tensor_list_push_back(handle, [i, 2])
handle, tensor = list_ops.tensor_list_pop_back(handle, dtypes.int32)
return tensor, list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_pop_back_no_shape_capture(self):
h = list_ops.tensor_list_reserve([2], 1, dtypes.int32)
h = list_ops.tensor_list_push_back(h, [1, 2])
def loop_fn(i):
handle, tensor = list_ops.tensor_list_pop_back(h, dtypes.int32)
handle = list_ops.tensor_list_push_back(handle, [1, i])
return tensor, list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_pop_back_with_shape(self):
@def_function.function
def loop_fn(i):
with backprop.GradientTape() as tape:
handle = list_ops.tensor_list_reserve(None, 1, dtypes.float32)
x = math_ops.cast(i, dtypes.float32)[None]
tape.watch(x)
handle = list_ops.tensor_list_push_back(handle, x)
stacked = list_ops.tensor_list_stack(handle, dtypes.float32)
list_grad = tape.gradient(stacked, x, x)
self.assertEqual("TensorListPopBack", list_grad.op.type)
return list_grad, stacked, list_grad.op.inputs[1]
self._test_loop_fn(loop_fn, 3)
def test_create_outside_and_scatter(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=h)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_create_inside_and_scatter(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_loop_variant_scatter_indices(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 10, dtypes.int32)
handle = list_ops.tensor_list_scatter(
[[1, i], [i + 1, 2]],
[i, i + 5], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_duplicate_indices(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 10, dtypes.int32)
handle = list_ops.tensor_list_scatter(
[[1, i], [1, i + 1], [i + 2, 3]],
[i, i, i + 2], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_create_outside_and_gather(self):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[2, 3]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
def loop_fn(i):
return (list_ops.tensor_list_gather(handle, [0, 1], dtypes.int32),
list_ops.tensor_list_gather(handle, [i], dtypes.int32))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_gather(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return (list_ops.tensor_list_gather(handle, [0, 1], dtypes.int32),
list_ops.tensor_list_gather(handle, [i], dtypes.int32))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_concat(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return gen_list_ops.tensor_list_concat_v2(
handle,
element_dtype=dtypes.int32,
element_shape=[2],
leading_dims=[])
output = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([[0, 2, 1, 2], [1, 2, 1, 2]], output[0])
self.assertAllClose([[2, 2], [2, 2]], output[1])
def test_create_outside_and_concat(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=h)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return gen_list_ops.tensor_list_concat_v2(
handle,
element_dtype=dtypes.int32,
element_shape=[2],
leading_dims=[])
output = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([[0, 2, 1, 2], [1, 2, 1, 2]], output[0])
self.assertAllClose([[2, 2], [2, 2]], output[1])
def test_tensor_list_from_tensor(self):
t = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
handle = list_ops.tensor_list_from_tensor(array_ops.gather(t, i), [4])
return list_ops.tensor_list_stack(handle, t.dtype)
self._test_loop_fn(loop_fn, 2)
@test_util.enable_control_flow_v2
def test_tensor_list_reserve_while_loop(self):
# Here a loop invariant TensorList is captured by a while_loop, which then
# performs loop dependent operations on it, resulting in a loop variant
# output. This forces stacking of the variant handle captured by the
# while_loop.
# We handle this particular case by forcing vectorization of
# TensorListReserve operation.
def loop_fn(i):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
_, out_handle = control_flow_ops.while_loop(
lambda j, _: j < 2, lambda j, h:
(j + 1, list_ops.tensor_list_set_item(h, j, i)), (0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 2)
@test_util.enable_control_flow_v2
def test_tensor_list_while_loop_stacked_cond_stacked_list(self):
def loop_fn(i):
handle = list_ops.tensor_list_from_tensor([20, 21, 22, 23, i], [])
_, out_handle = control_flow_ops.while_loop(
lambda j, _: j < i,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, i)),
(0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
@test_util.enable_control_flow_v2
def test_tensor_list_while_loop_stacked_cond_stacked_list_unknown_shape(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve(None, 5, dtypes.int32)
_, handle = control_flow_ops.while_loop(
lambda j, _: j < 5,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, 0)),
(0, handle))
_, out_handle = control_flow_ops.while_loop(
lambda j, _: j < i,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, i)),
(0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
@test_util.enable_control_flow_v2
def test_tensor_list_while_loop_stacked_cond_unstacked_list(self):
def loop_fn(i):
handle = list_ops.tensor_list_from_tensor([20, 21, 22, 23, 24], [])
_, out_handle = control_flow_ops.while_loop(
lambda j, _: j < i,
lambda j, h: (j + 1, list_ops.tensor_list_set_item(h, j, i)),
(0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_tensor_list_addn_already_stacked(self):
def loop_fn(i):
l1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l1 = list_ops.tensor_list_set_item(l1, 0, i)
l2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l2 = list_ops.tensor_list_set_item(l2, 1, i)
return list_ops.tensor_list_stack(math_ops.add_n([l1, l2]), dtypes.int32)
self._test_loop_fn(loop_fn, 2)
def test_tensor_list_addn_stacking_required(self):
l1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l1 = list_ops.tensor_list_set_item(l1, 1, 1)
def loop_fn(i):
l2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l2 = list_ops.tensor_list_set_item(l2, 1, i)
return list_ops.tensor_list_stack(
math_ops.add_n([l1, l2]), dtypes.int32)
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class TensorTest(PForTestCase):
def test_loop_variant_scatter_update_no_shape(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=None, dtype=dtypes.int32)
])
def shapeless_func(tensor, indices, updates):
return array_ops.tensor_scatter_nd_update(tensor, indices, updates)
def loop_fn(i):
tensor = [0, 0, 0, 0, 0, 0, 0, 0]
indices = [[i], [i + 1], [i + 3], [i + 2]]
updates = [i, i - 10, i + 11, 12]
return shapeless_func(tensor, indices, updates)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_update_singles(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
tensor = [0, 0, 0, 0, 0, 0, 0, 0]
indices = [[i], [i+1], [i+3], [i+2]]
updates = [i, i-10, i+11, 12]
return array_ops.tensor_scatter_nd_update(tensor, indices, updates)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_update_slices(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
tensor = array_ops.zeros([10, 3], dtype=dtypes.int32)
indices = [[i+2], [4]]
updates = [[1, i*2, 3], [i+4, i-5, 6]]
return array_ops.tensor_scatter_nd_update(tensor, indices, updates)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_update_multi_dim_index(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
tensor = array_ops.zeros([10, 3], dtype=dtypes.int32)
indices = [[i+2, 1], [4, 2]]
updates = [i, 5]
return array_ops.tensor_scatter_nd_update(tensor, indices, updates)
self._test_loop_fn(loop_fn, 5)
def test_loop_variant_scatter_update_folded_indices(self):
if test_util.is_gpu_available():
self.skipTest(
"Flaky in some GPU configurations due to TensorScatterNdUpdate "
"nondeterminism.")
def loop_fn(i):
tensor = array_ops.zeros([5, 5])
indices = [
[[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]],
[[0, 4], [1, 3], [2, 2], [3, 1], [4, 0]],
]
updates = [
[1, i, 1, 1, 1],
[1, 1, i+2, 1, i-5],
]
return array_ops.tensor_scatter_nd_update(tensor, indices, updates)
self._test_loop_fn(loop_fn, 5)
class OptionalTest(PForTestCase):
def test_optional_from_value(self):
def loop_fn(i):
o = gen_dataset_ops.optional_from_value(
[i, i + 1, constant_op.constant(3)])
gen_dataset_ops.optional_none()
return gen_dataset_ops.optional_get_value(
o, [dtypes.int32, dtypes.int32, dtypes.int32],
[[], [], []])
self._test_loop_fn(loop_fn, 2)
class StackTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_stack_inside_loop_invariant(self):
def loop_fn(_):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, 1)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_stack_inside_push_loop_dependent(self):
def loop_fn(i):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, i)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_pop(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op = data_flow_ops.stack_push_v2(s, 5)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 6)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 7)
def loop_fn(_):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e1]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
with ops.control_dependencies([op]):
e1, e2 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
with ops.control_dependencies([e1, e2]):
e3 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
v1, v2, v3 = self._run_targets([e1, e2, e3], run_init=False)
self.assertAllEqual([7, 7], v1)
self.assertAllEqual([6, 6], v2)
self.assertAllEqual(5, v3)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_push(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
def loop_fn(_):
return data_flow_ops.stack_push_v2(s, 7)
with self.assertRaisesRegex(ValueError, "StackPushV2 not allowed.*"):
pfor_control_flow_ops.pfor(loop_fn, iters=2)
# TODO(agarwal): test nested while_loops. This currently requires converting a
# tf.cond.
class WhileV1Test(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.disable_control_flow_v2()
super(WhileV1Test, self).setUp()
def tearDown(self):
if self._enabled:
control_flow_v2_toggles.enable_control_flow_v2()
super(WhileV1Test, self).tearDown()
def test_while_outside_loop(self):
x = control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return x + i
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_with_stateful_ops(self):
def loop_fn(_):
return control_flow_ops.while_loop(
lambda j, x: j < 4, lambda j, x:
(j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
_, total = control_flow_ops.while_loop(
lambda j, _: j < lengths_i, lambda j, t:
(j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
return total
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_jacobian(self):
x = random_ops.random_uniform([1, 3])
y = random_ops.random_uniform([3, 3])
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# The above code does not work with tf.while_loop instead of pfor. So we
# manually compute the expected output here.
# Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
expected_output = y
for _ in range(3):
expected_output = math_ops.matmul(expected_output, y)
expected_output = array_ops.transpose(expected_output, [1, 0])
with session.Session() as sess:
out, expected = sess.run([out, expected_output])
self.assertAllClose(expected, out)
@test_util.run_v1_only("b/122612051")
def test_tensor_array_as_loop_variable(self):
def loop_fn(i):
def body(j, ta):
ta = ta.write(j, i + j * j)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))
return ta.stack()
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_read_tensor_array_partitioned_indices(self):
# Note that tensor array values are pfor loop dependent, and the while loop
# termination condition is also dependent on pfor iteration.
def loop_fn(i):
ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)
ta = ta.unstack(i + list(range(5)))
def body(j, s):
return j + 1, s + ta.read(j)
_, s = control_flow_ops.while_loop(lambda j, _: j < i, body, (0, 0))
return s
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_external_while_loop_grad(self):
# Here we test that external while_loops that are extended from inside pfor
# (due to gradient calls) are not actually converted. If the below was
# converted all pfor iterations would write to the same tensor array
# indices.
x = constant_op.constant(1.)
def body(j, ta):
ta = ta.write(j, x)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))
out = ta.stack()
def loop_fn(i):
out_i = array_ops.gather(out, i)
return gradient_ops.gradients(out_i, x)[0]
with session.Session() as sess:
# out is [x, x, x]. Hence the gradients should be [1, 1, 1].
self.assertAllEqual([1, 1, 1],
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))
@test_util.run_v1_only("b/122612051")
def test_tensor_array_grad(self):
inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
ta = ta.unstack(inp)
def loop_fn(i):
def body(j, x):
value = ta.gather([j])
value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
return j + 1, x + value
_, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
(0, array_ops.zeros([2])))
out = math_ops.reduce_prod(out)
return out, gradient_ops.gradients(out, inp)[0]
pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
# Note that tf.while_loop does not work in the setup above. So we manually
# construct the equivalent computation of the above loops here.
real_out = math_ops.reduce_sum(inp, axis=[0])
real_out = math_ops.reduce_prod(real_out, axis=[1])
# Note that gradients of real_out will accumulate the gradients across the
# output value. Hence we do the same aggregation on pfor_out_grad.
real_out_grad = gradient_ops.gradients(real_out, inp)[0]
sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])
with session.Session() as sess:
v1, v2, v1_grad, v2_grad = sess.run(
[pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
self.assertAllClose(v1, v2)
self.assertAllClose(v1_grad, v2_grad)
def dynamic_lstm_input_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)
sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)
return inputs, sequence_length
def create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):
cell = cell_fn(state_size)
inputs, sequence_length = dynamic_lstm_input_fn(batch_size, state_size,
max_steps)
inputs_ta = tensor_array_ops.TensorArray(
dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])
inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])
inputs_ta = inputs_ta.unstack(inputs_time_major)
zeros = array_ops.zeros([state_size])
def loop_fn(i):
sequence_length_i = array_ops.gather(sequence_length, i)
def body_fn(t, state, ta):
inputs_t = array_ops.expand_dims(
array_ops.gather(inputs_ta.read(t), i), 0)
output, new_state = cell(inputs_t, state)
output = array_ops.reshape(output, [-1])
# TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
# array_ops.where when t < min(sequence_length). Doing that requires
# supporting tf.cond pfor conversion.
done = t >= sequence_length_i
output = array_ops.where(done, zeros, output)
ta = ta.write(t, output)
new_state = [
array_ops.where(done, s, ns)
for s, ns in zip(nest.flatten(state), nest.flatten(new_state))
]
new_state = nest.pack_sequence_as(state, new_state)
return t + 1, new_state, ta
def condition_fn(t, _, unused):
del unused
return t < max_steps
initial_state = cell.zero_state(1, dtypes.float32)
_, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
0, initial_state,
tensor_array_ops.TensorArray(dtypes.float32, max_steps)
])
new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
new_state = nest.pack_sequence_as(initial_state, new_state)
return ta.stack(), new_state
pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)
tf_output = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float32))
return pfor_output, tf_output
@test_util.run_all_in_graph_and_eager_modes
class WhileV2Test(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.enable_control_flow_v2()
super(WhileV2Test, self).setUp()
def tearDown(self):
if not self._enabled:
control_flow_v2_toggles.disable_control_flow_v2()
super(WhileV2Test, self).tearDown()
def test_while_outside_loop(self):
def _f():
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return _f() + i
self._test_loop_fn(loop_fn, 3)
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3)
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3)
def test_while_with_stateful_ops(self):
def loop_fn(_):
j, _ = control_flow_ops.while_loop(
lambda j, x: j < 4, lambda j, x:
(j + 1, x + random_ops.random_uniform([])), [0, 0.])
return j
self._test_loop_fn(loop_fn, 3)
def test_while_with_variable(self):
v = resource_variable_ops.ResourceVariable(5.)
def loop_fn(_):
_, output = control_flow_ops.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + v), [0, 0.])
return output
self._test_loop_fn(loop_fn, 3)
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3)
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
return control_flow_ops.while_loop(
lambda j, _: j < lengths_i, lambda j, t:
(j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
self._test_loop_fn(loop_fn, 3)
def test_while_change_input_invariance(self):
# This tests cases where a loop invariant input to while has loop dependent
# operations applied to it inside the while body.
# It also test inputs that are passed through.
def loop_fn(i):
return control_flow_ops.while_loop(
lambda j, *_: j < i, lambda j, x, y, z, w:
(j + 1, x + i, y + x, z, w), [
0,
constant_op.constant(0),
constant_op.constant(1), i,
constant_op.constant(2)
])
self._test_loop_fn(loop_fn, 3)
def test_while_shape_invariants(self):
def loop_fn(i):
return control_flow_ops.while_loop(
lambda j, *_: j < 4,
lambda j, x, y: (j + 1, x + i, y + 1),
[0, constant_op.constant([0, 1]),
constant_op.constant([2, 3])],
shape_invariants=[
None,
tensor_shape.TensorShape([2]),
tensor_shape.TensorShape([2])
])
self._test_loop_fn(loop_fn, 3)
def test_while_jacobian(self):
# Note that we wrap the code below in a tf.function since we don't want the
# while_loop call to be evaluated eagerly using a python loop.
@def_function.function
def _f(x, y, use_pfor):
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
grad = gradient_ops.gradients(out_i, x)
return array_ops.reshape(grad[0], [-1])
if use_pfor:
return pfor_control_flow_ops.pfor(loop_fn, iters=3)
else:
return pfor_control_flow_ops.for_loop(
loop_fn, iters=3, loop_fn_dtypes=out.dtype)
x = constant_op.constant(np.random.uniform(size=(1, 3)))
y = constant_op.constant(np.random.uniform(size=(3, 3)))
self.assertAllClose(_f(x, y, True), _f(x, y, False))
def test_scan(self):
np.random.seed(seed=42)
data = np.random.randn(3).astype(np.float32)
def log_prob(x):
return math_ops.reduce_sum(functional_ops.scan_v2(
lambda _, yi: (x - yi)**2,
elems=data,
initializer=constant_op.constant(0.)))
x = variables.Variable(array_ops.ones([2]))
self.evaluate(x.initializer)
v_log_prob = lambda x: pfor_control_flow_ops.vectorized_map(log_prob, x)
theoretical, numerical = gradient_checker_v2.compute_gradient(
v_log_prob, (x,), delta=1e-3)
self.assertAllClose(theoretical, numerical, rtol=1e-2)
def test_scan_captured_variable(self):
if not context.executing_eagerly():
self.skipTest("Test only written for 2.x")
v = variables.Variable(math_ops.range(10, dtype=dtypes.float32))
def loop_fn(idx):
del idx
return functional_ops.scan_v2(lambda _, i: array_ops.gather(v, i),
elems=math_ops.range(v.shape[0]),
initializer=0.0)
with backprop.GradientTape() as tape:
result = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([2.] * 10, tape.gradient(result, v))
@test_util.run_all_in_graph_and_eager_modes
class NestedControlFlowTest(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.enable_control_flow_v2()
super(NestedControlFlowTest, self).setUp()
def tearDown(self):
if not self._enabled:
control_flow_v2_toggles.disable_control_flow_v2()
super(NestedControlFlowTest, self).tearDown()
def _cond(self, f=None, split=0):
if f is None:
f = lambda x, y: (x, y)
def _f(x, y):
return control_flow_ops.cond(y > split, lambda: f(x, y), lambda:
(x + 1., y))
return _f
def _while(self, f=None):
if f is None:
f = lambda x, y: (x, y)
def _f(x, y):
return control_flow_ops.while_loop(
lambda j, _: j < y, lambda j, t:
(j + 1, t + array_ops.gather(f(x, y)[0], j)), [0, x])[1], y
return _f
def _test_helper(self, f):
x = random_ops.random_uniform([5, 5])
y = constant_op.constant([4, -1, 2, -2, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return f(x_i, y_i)
self._test_loop_fn(loop_fn, 5)
def test_cond_while(self):
self._test_helper(self._cond(self._while()))
def test_while_cond(self):
self._test_helper(self._while(self._cond()))
def test_while_while(self):
self._test_helper(self._while(self._while()))
def test_cond_cond(self):
self._test_helper(self._cond(self._cond()))
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class StatelessIfTest(PForTestCase):
def test_loop_variant_cond(self):
x = [1, 2, 3, 4, 5.]
y = 2.5
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
# Note that the output has a combination of then and else branches being
# loop variant / invariant.
return cond_v2.cond_v2(x_i < y, lambda: (y - x_i, y, 1., 2.), lambda:
(x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
def test_loop_invariant_cond(self):
x = [1, 2, 3, 4, 5.]
y = 0.5
z = random_ops.random_uniform([])
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
# Note that the output has a combination of then and else branches being
# loop variant / invariant.
return cond_v2.cond_v2(z < y, lambda: (y - x_i, y, 1., 2.), lambda:
(x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
def test_empty_branch(self):
x = [1, 2, 3, 4, 5.]
y = 6.
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
return cond_v2.cond_v2(
x_i < y, # Note that else branch is empty.
lambda: (y - x_i, y, 1., 2.),
lambda: (x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class IfTest(PForTestCase):
def test_read_var(self):
self.skipTest("b/156438918") # Flaky
x = [1, 2, 3, 4, 5.]
y = 2.5
z = resource_variable_ops.ResourceVariable(5.)
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
return cond_v2.cond_v2(x_i < y, lambda: z - x_i, lambda: z + x_i)
self._test_loop_fn(loop_fn, iters=5)
class RNNTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_dynamic_rnn(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell, 3, 5,
7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
@test_util.run_v1_only("b/122612051")
def test_dynamic_lstm(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell, 3, 5,
7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop
# conversion don't look good. Some of it seems like lot of copies between host
# and device. Optimize that.
class Benchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
sess.run(init)
run_fn = sess.make_callable(targets)
run_fn() # Warm up
begin = time.time()
for _ in range(iters):
run_fn()
end = time.time()
avg_time_ms = 1000 * (end - begin) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_sess_run_overhead(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0)
self._run(x, 10000, name="session_run_overhead")
def benchmark_add(self):
with ops.Graph().as_default():
n = 256
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([n, params])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return x_i + y_i
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = x + y
self._run(manual, 1000, name="manual_add")
self._run(pfor_outputs, 1000, name="pfor_add")
self._run(while_outputs, 100, name="while_add")
def benchmark_matmul(self):
with ops.Graph().as_default():
n = 1024
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([params, params])
def loop_fn(i):
x_i = array_ops.expand_dims(array_ops.gather(x, i), 0)
return math_ops.matmul(x_i, y)
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = math_ops.matmul(x, y)
self._run(manual, 1000, name="manual_matmul")
self._run(pfor_outputs, 1000, name="pfor_matmul")
self._run(while_outputs, 100, name="while_matmul")
def benchmark_map_fn(self):
with ops.Graph().as_default():
b = 256
params = 1000
inp = random_ops.random_normal((b, params))
fn = lambda x: x * x
def pfor_map_fn(f, x):
return pfor_control_flow_ops.pfor(lambda i: f(array_ops.gather(x, i)),
array_ops.shape(x)[0])
map_output = map_fn.map_fn(fn, inp)
pfor_output = pfor_map_fn(fn, inp)
self._run(map_output, 100, name="tf_map_fn")
self._run(pfor_output, 100, name="pfor_map_fn")
def benchmark_basic_while(self):
with ops.Graph().as_default():
def loop_fn(i):
_, s = control_flow_ops.while_loop(lambda t, x: t < i, lambda t, x:
(t + 1, x + i), [0, 0])
return s
iters = 50
pfor_output = pfor_control_flow_ops.pfor(loop_fn, iters)
for_loop_output = pfor_control_flow_ops.for_loop(loop_fn, dtypes.int32,
iters)
self._run(pfor_output, 100, name="pfor_basic")
self._run(for_loop_output, 100, name="for_loop_basic")
def benchmark_dynamic_rnn(self):
with ops.Graph().as_default():
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell, 128,
512, 16)
self._run(pfor_outputs, 100, name="pfor_rnn")
self._run(tf_outputs, 100, name="tf_rnn")
def benchmark_reduction(self):
n = 1024
with ops.Graph().as_default():
x = random_ops.random_uniform([n, n])
w = random_ops.random_uniform([n, n])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return math_ops.reduce_sum(
math_ops.matmul(pfor_config.reduce_concat(x_i), w))
# Note that output_reduction will be tiled, so there may be some minor
# overheads compared to output_no_reduction.
output_reduction = pfor_control_flow_ops.pfor(loop_fn, n)
output_no_reduction = math_ops.reduce_sum(math_ops.matmul(x, w))
# Benchmark to test that reduction does not add overhead and its output is
# treated as loop invariant.
self._run(output_reduction, 30, name="matmul_reduction")
self._run(output_no_reduction, 30, name="matmul_no_reduction")
class SparseTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_var_loop_len(self):
num_iters = array_ops.placeholder(dtypes.int32)
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 3})
@test_util.run_v1_only("b/122612051")
def test_sparse_result_none_stacked(self):
num_iters = 10
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
indices = [[i, j] for i in range(num_iters) for j in range(3)]
values = [4, 5, 6] * num_iters
dense_shapes = [num_iters, 3]
# Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]
manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_all_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]
# Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_indices_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, [1], [num_iters])
# Expected result: identity matrix size num_iters * num_iters
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_values_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]
# Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]
# Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked_2D(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)
shape = array_ops.concat([i, i], 0)
return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]
# Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],
[1] * num_iters,
(num_iters, num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
# Dummy CompositeTensor to test CompositeTensor support.
class Particle(composite_tensor.CompositeTensor):
"""A (batch of) particles each defined by a mass and a scalar velocity."""
def __init__(self, mass, velocity):
mass = ops.convert_to_tensor(mass)
velocity = ops.convert_to_tensor(velocity)
self.shape = array_ops.broadcast_static_shape(mass.shape, velocity.shape)
self.mass = mass
self.velocity = velocity
@property
def _type_spec(self):
return ParticleSpec(
type_spec.type_spec_from_value(self.mass),
type_spec.type_spec_from_value(self.velocity))
class ParticleSpec(type_spec.BatchableTypeSpec):
def __init__(self, mass, velocity):
self.shape = array_ops.broadcast_static_shape(
mass.shape, velocity.shape)
self.mass = mass
self.velocity = velocity
def _serialize(self):
return (self.mass, self.velocity)
@property
def value_type(self):
return Particle
@property
def _component_specs(self):
return (self.mass, self.velocity)
def _to_components(self, value):
return (value.mass, value.velocity)
def _from_components(self, components):
return Particle(*components)
def _pad_shape_to_full_rank(self, s):
"""Pad component shapes with 1's so all components have the same rank."""
return tensor_shape.TensorShape(
[1] * (self.shape.ndims - s.ndims)).concatenate(s)
def _batch(self, batch_size):
return ParticleSpec(
mass=tensor_spec.TensorSpec(
dtype=self.mass.dtype,
shape=tensor_shape.TensorShape([batch_size]).concatenate(
self._pad_shape_to_full_rank(self.mass.shape))),
velocity=tensor_spec.TensorSpec(
dtype=self.velocity.dtype,
shape=tensor_shape.TensorShape([batch_size]).concatenate(
self._pad_shape_to_full_rank(self.velocity.shape))))
def _unbatch(self):
return ParticleSpec(
tensor_spec.TensorSpec(dtype=self.mass.dtype,
shape=self.mass.shape[1:]),
tensor_spec.TensorSpec(dtype=self.velocity.dtype,
shape=self.velocity.shape[1:]))
def _to_tensor_list(self, value):
return [array_ops.reshape(
value.mass,
self._pad_shape_to_full_rank(value.mass.shape)),
array_ops.reshape(
value.velocity,
self._pad_shape_to_full_rank(value.velocity.shape))]
class CompositeTensorTest(PForTestCase, parameterized.TestCase):
@parameterized.parameters((None,), (3,))
def test_create_composite_inside_loop(self, parallel_iterations):
num_particles = 10
velocities = random_ops.random_uniform([num_particles])
particles = pfor_control_flow_ops.pfor(
# Build a batch of particles all with the same mass.
lambda i: Particle(mass=4., velocity=array_ops.gather(velocities, i)),
num_particles,
parallel_iterations=parallel_iterations)
particles_mass, particles_velocity, velocities = self.evaluate(
(particles.mass, particles.velocity, velocities))
self.assertAllEqual(particles_mass, 4. * np.ones([num_particles]))
self.assertAllEqual(particles_velocity, velocities)
@parameterized.parameters((None,), (3,))
def test_composite_is_converted_to_batched_tensor(
self, parallel_iterations):
particles = pfor_control_flow_ops.pfor(
lambda _: Particle(mass=random_ops.random_uniform([3]), # pylint: disable=g-long-lambda
velocity=random_ops.random_uniform([5, 3])),
4,
parallel_iterations=parallel_iterations)
# Naively batching the component shapes would give `[4, 3]` and `[4, 5, 3]`
# which have no consistent broadcast shape.
self.assertEqual(particles.mass.shape, [4, 1, 3])
self.assertAllEqual(particles.velocity.shape, [4, 5, 3])
def test_vectorized_map_gathers_composite_tensors(self):
particles = Particle(mass=[1., 2., 3., 4., 5.],
velocity=[1., 2., 3., 4., 5.])
self.assertAllEqual(
pfor_control_flow_ops.vectorized_map(
lambda x: x.mass * x.velocity, particles),
particles.mass * particles.velocity)
def test_vectorized_map_of_ragged_tensors(self):
# Vmap should be able to handle ragged Tensors as long as they're not
# *actually* ragged.
ragged = ragged_tensor.RaggedTensor.from_uniform_row_length(
ragged_tensor.RaggedTensor.from_row_lengths(
values=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
row_lengths=[3, 3, 3, 3]),
uniform_row_length=2) # Overall shape [2, 2, 3].
self.assertAllEqual(
pfor_control_flow_ops.vectorized_map(
lambda x: x.to_tensor(shape=[2, 3]), ragged),
ragged.to_tensor(shape=[2, 2, 3]))
class ParsingTest(PForTestCase):
def test_decode_csv(self):
csv_tensor = constant_op.constant([["1:2:3"], ["::"], ["7:8:9"]])
kwargs = {"record_defaults": [[10], [20], [30]], "field_delim": ":"}
def loop_fn(i):
line = array_ops.gather(csv_tensor, i)
return parsing_ops.decode_csv(line, **kwargs)
self._test_loop_fn(loop_fn, iters=3)
@test_util.run_v1_only("b/122612051")
def test_parse_single_example(self):
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
examples = constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
])
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
def loop_fn(i):
example_proto = array_ops.gather(examples, i)
f = parsing_ops.parse_single_example(example_proto, features)
return f
pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)
manual = parsing_ops.parse_example(examples, features)
self.run_and_assert_equal(pfor, manual)
class PartitionedCallTest(PForTestCase):
def test_simple(self):
@def_function.function
def f(x):
return math_ops.square(x) + 1
z = random_ops.random_uniform([4])
def loop_fn(i):
return f(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_nested_calls(self):
@def_function.function
def inner(x):
return math_ops.square(x)
@def_function.function
def outer(y):
return math_ops.reduce_sum(inner(y)) + 2
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
return outer(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_nested_definition(self):
@def_function.function
def outer(y):
@def_function.function
def inner(x):
return math_ops.square(x) + 1
return math_ops.reduce_sum(inner(y)) + 2
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
return outer(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_gradients(self):
@def_function.function
def f(x):
return math_ops.square(x) + 1
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
z_i = array_ops.gather(z, i)
with backprop.GradientTape() as g:
g.watch(z_i)
out = f(z_i)
return out, g.gradient(out, z_i)
self._test_loop_fn(loop_fn, 4)
def test_stateful_with_gradients(self):
z = random_ops.random_uniform([4, 2])
v = variables.Variable(z[0])
@def_function.function
def f(x):
return math_ops.square(x) + v + 1
def loop_fn(i):
z_i = array_ops.gather(z, i)
with backprop.GradientTape() as g:
g.watch(z_i)
out = f(z_i)
return out, g.gradient(out, z_i)
self._test_loop_fn(loop_fn, 4)
class SpectralTest(PForTestCase, parameterized.TestCase):
@parameterized.parameters(
(fft_ops.fft,),
(fft_ops.fft2d,),
(fft_ops.fft3d,),
(fft_ops.ifft,),
(fft_ops.ifft2d,),
(fft_ops.ifft3d,),
)
def test_fft(self, op_func):
shape = [2, 3, 4, 3, 4]
x = np.random.uniform(size=shape) + 1j * np.random.uniform(size=shape)
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
self._test_loop_fn(loop_fn, 2)
@parameterized.parameters(
(fft_ops.rfft,),
(fft_ops.rfft2d,),
(fft_ops.rfft3d,),
)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="Disable subtest on ROCm due to rocfft issues")
def test_rfft(self, op_func):
for dtype in (dtypes.float32, dtypes.float64):
x = random_ops.random_uniform([2, 3, 4, 3, 4], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
@parameterized.parameters(
(fft_ops.irfft,),
(fft_ops.irfft2d,),
(fft_ops.irfft3d,),
)
@test.disable_with_predicate(
pred=test.is_built_with_rocm,
skip_message="Disable subtest on ROCm due to rocfft issues")
def test_irfft(self, op_func):
if config.list_physical_devices("GPU"):
# TODO(b/149957923): The test is flaky
self.skipTest("b/149957923: irfft vectorization flaky")
for dtype in (dtypes.complex64, dtypes.complex128):
shape = [2, 3, 4, 3, 4]
x = np.random.uniform(size=shape) + 1j * np.random.uniform(size=shape)
x = math_ops.cast(x, dtype=dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
class VariableTest(PForTestCase):
def test_create_variable_once(self):
x = array_ops.ones(shape=(3, 2, 2), dtype=dtypes.float32)
y = array_ops.ones(shape=(2, 3), dtype=dtypes.float32)
a_var = []
def f(z):
if not a_var:
a_var.append(variables.Variable(lambda: y, name="a"))
return math_ops.matmul(z, a_var[0] / 16)
pfor_control_flow_ops.vectorized_map(f, x)
@test_util.run_v2_only
def test_create_variable_repeated(self):
x = array_ops.ones(shape=(3, 2, 2), dtype=dtypes.float32)
y = array_ops.ones(shape=(2, 3), dtype=dtypes.float32)
def f(z):
a_var = variables.Variable(lambda: y, name="a") / 4
return math_ops.matmul(z, a_var / 16)
# Note that this error is only raised under v2 behavior.
with self.assertRaisesRegex(
ValueError, "singleton tf.Variable.*on the first call"):
pfor_control_flow_ops.vectorized_map(f, x)
@test_util.run_all_in_graph_and_eager_modes
def test_variable_shape(self):
v = resource_variable_ops.ResourceVariable([1, 2])
def loop_fn(_):
return resource_variable_ops.variable_shape(v.handle)
self._test_loop_fn(loop_fn, 2)
if __name__ == "__main__":
test.main()
|
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2006, Frank Scholz <coherence@beebits.net>
import string
import socket
import os, sys
import traceback
from zope.interface import implements, Interface
from twisted.python import log, filepath, util
from twisted.python.components import registerAdapter
from twisted.internet import task, address
from twisted.internet import reactor
from nevow import athena, inevow, loaders, tags, static
from twisted.web import resource
import louie
from coherence.upnp.core.ssdp import SSDPServer
from coherence.upnp.core.msearch import MSearch
from coherence.upnp.core.device import Device, RootDevice
from coherence.upnp.core.utils import parse_xml, get_ip_address, get_host_address
from coherence.upnp.devices.control_point import ControlPoint
from coherence.upnp.devices.media_server import MediaServer
from coherence.upnp.devices.media_renderer import MediaRenderer
from coherence.backends.fs_storage import FSStore
from coherence.backends.elisa_storage import ElisaMediaStore
from coherence.backends.flickr_storage import FlickrStore
try:
from coherence.backends.gstreamer_audio_player import Player
except:
pass
from coherence.extern.logger import Logger, LOG_WARNING
log = Logger('Coherence')
class IWeb(Interface):
def goingLive(self):
pass
class Web(object):
def __init__(self, coherence):
super(Web, self).__init__()
self.coherence = coherence
class MenuFragment(athena.LiveFragment):
jsClass = u'Coherence.Base'
docFactory = loaders.stan(
tags.div(render=tags.directive('liveFragment'))[
tags.div(id="coherence_menu_box",class_="coherence_menu_box")[""],
]
)
def __init__(self, page):
super(MenuFragment, self).__init__()
self.page = page
self.coherence = page.coherence
self.tabs = []
def going_live(self):
log.info("add a view to the MenuFragment")
d = self.page.notifyOnDisconnect()
d.addCallback( self.remove_me)
d.addErrback( self.remove_me)
return self.tabs
athena.expose(going_live)
def add_tab(self,title,active):
log.info("add tab %s to the MenuFragment" % title)
new_tab = {u'title':unicode(title),
u'active':unicode(active)}
for t in self.tabs:
if t['title'] == new_tab['title']:
return
self.tabs.append(new_tab)
self.callRemote('addTab', new_tab)
def remove_me(self, result):
log.info("remove view from MenuFragment")
class DevicesFragment(athena.LiveFragment):
jsClass = u'Coherence.Devices'
docFactory = loaders.stan(
tags.div(render=tags.directive('liveFragment'))[
tags.div(id="Devices-container",class_="coherence_container")[""],
]
)
def __init__(self, page, active):
super(DevicesFragment, self).__init__()
self.page = page
self.coherence = page.coherence
self.page.menu.add_tab('Devices',active)
def going_live(self):
log.info("add a view to the DevicesFragment")
d = self.page.notifyOnDisconnect()
d.addCallback( self.remove_me)
d.addErrback( self.remove_me)
devices = []
for device in self.coherence.get_devices():
if device != None:
_,_,_,device_type,version = device.get_device_type().split(':')
name = unicode("%s:%s %s" % (device_type,version,device.get_friendly_name()))
usn = unicode(device.get_usn())
devices.append({u'name':name,u'usn':usn})
louie.connect( self.add_device, 'Coherence.UPnP.Device.detection_completed', louie.Any)
louie.connect( self.remove_device, 'Coherence.UPnP.Device.removed', louie.Any)
return devices
athena.expose(going_live)
def remove_me(self, result):
log.info("remove view from the DevicesFragment")
def add_device(self, device):
log.info("DevicesFragment found device %s %s of type %s" %(
device.get_usn(),
device.get_friendly_name(),
device.get_device_type()))
_,_,_,device_type,version = device.get_device_type().split(':')
name = unicode("%s:%s %s" % (device_type,version,device.get_friendly_name()))
usn = unicode(device.get_usn())
self.callRemote('addDevice', {u'name':name,u'usn':usn})
def remove_device(self, usn):
log.info("DevicesFragment remove device",usn)
self.callRemote('removeDevice', unicode(usn))
def render_devices(self, ctx, data):
cl = []
log.info('children: %s' % self.coherence.children)
for c in self.coherence.children:
device = self.coherence.get_device_with_id(c)
if device != None:
_,_,_,device_type,version = device.get_device_type().split(':')
cl.append( tags.li[tags.a(href='/'+c)[device_type,
':',
version,
' ',
device.get_friendly_name()]])
else:
cl.append( tags.li[c])
return ctx.tag[tags.ul[cl]]
class LoggingFragment(athena.LiveFragment):
jsClass = u'Coherence.Logging'
docFactory = loaders.stan(
tags.div(render=tags.directive('liveFragment'))[
tags.div(id="Logging-container",class_="coherence_container")[""],
]
)
def __init__(self, page, active):
super(LoggingFragment, self).__init__()
self.page = page
self.coherence = page.coherence
self.page.menu.add_tab('Logging','no')
def going_live(self):
log.info("add a view to the LoggingFragment")
d = self.page.notifyOnDisconnect()
d.addCallback( self.remove_me)
d.addErrback( self.remove_me)
return {}
athena.expose(going_live)
def remove_me(self, result):
log.info("remove view from the LoggingFragment")
class WebUI(athena.LivePage):
"""
"""
jsClass = u'Coherence'
addSlash = True
docFactory = loaders.xmlstr("""\
<html xmlns:nevow="http://nevow.com/ns/nevow/0.1">
<head>
<nevow:invisible nevow:render="liveglue" />
<link rel="stylesheet" type="text/css" href="web/main.css" />
</head>
<body>
<div id="coherence_header"><div class="coherence_title">Coherence</div><div nevow:render="menu"></div></div>
<div id="coherence_body">
<span nevow:render="devices" />
<span nevow:render="logging" />
</div>
</body>
</html>
""")
def __init__(self, *a, **kw):
super(WebUI, self).__init__( *a, **kw)
self.coherence = self.rootObject.coherence
self.jsModules.mapping.update({
'MochiKit': filepath.FilePath(__file__).parent().child('web').child('MochiKit.js').path})
self.jsModules.mapping.update({
'Coherence': filepath.FilePath(__file__).parent().child('web').child('Coherence.js').path})
self.jsModules.mapping.update({
'Coherence.Base': filepath.FilePath(__file__).parent().child('web').child('Coherence.Base.js').path})
self.jsModules.mapping.update({
'Coherence.Devices': filepath.FilePath(__file__).parent().child('web').child('Coherence.Devices.js').path})
self.jsModules.mapping.update({
'Coherence.Logging': filepath.FilePath(__file__).parent().child('web').child('Coherence.Logging.js').path})
self.menu = MenuFragment(self)
def childFactory(self, ctx, name):
log.info('WebUI childFactory: %s' % name)
try:
return self.rootObject.coherence.children[name]
except:
ch = super(WebUI, self).childFactory(ctx, name)
if ch is None:
p = util.sibpath(__file__, name)
log.info('looking for file',p)
if os.path.exists(p):
ch = static.File(p)
return ch
def render_listmenu(self, ctx, data):
l = []
l.append(tags.div(id="t",class_="coherence_menu_item")[tags.a(href='/'+'devices',class_="coherence_menu_link")['Devices']])
l.append(tags.div(id="t",class_="coherence_menu_item")[tags.a(href='/'+'logging',class_="coherence_menu_link")['Logging']])
return ctx.tag[l]
def render_listchilds(self, ctx, data):
cl = []
log.info('children: %s' % self.coherence.children)
for c in self.coherence.children:
device = self.coherence.get_device_with_id(c)
if device != None:
_,_,_,device_type,version = device.get_device_type().split(':')
cl.append( tags.li[tags.a(href='/'+c)[device_type,
':',
version,
' ',
device.get_friendly_name()]])
else:
cl.append( tags.li[c])
return ctx.tag[tags.ul[cl]]
def render_menu(self, ctx, data):
log.info('render_menu')
return self.menu
def render_devices(self, ctx, data):
log.info('render_devices')
return DevicesFragment(self,'yes')
def render_logging(self, ctx, data):
log.info('render_logging')
return LoggingFragment(self,'no')
class WebServer:
def __init__(self, port, coherence):
from nevow import appserver
def ResourceFactory( original):
return WebUI( IWeb, original)
registerAdapter(ResourceFactory, Web, inevow.IResource)
self.web_root_resource = Web(coherence)
#self.web_root_resource = inevow.IResource( web)
#print self.web_root_resource
self.site = appserver.NevowSite( self.web_root_resource)
reactor.listenTCP( port, self.site)
log.warning( "WebServer on port %d ready" % port)
class Coherence:
def __init__(self, config):
self.devices = []
self.children = {}
self._callbacks = {}
try:
logmode = config['logmode']
except:
logmode = 'info'
try:
network_if = config['interface']
except:
network_if = None
try:
self.web_server_port = config['serverport']
except:
self.web_server_port = 30020
log.set_master_level(logmode)
try:
subsystem_log = config['subsystem_log']
except:
subsystem_log = {}
for subsystem,level in subsystem_log.items():
log.warning( "setting log-level for subsystem %s to %s" % (subsystem,level))
log.set_level(name=subsystem,level=level)
#log.disable(name='Variable')
#log.enable(name='Variable')
#log.set_level(name='Variable')
plugin = louie.TwistedDispatchPlugin()
louie.install_plugin(plugin)
log.warning("Coherence UPnP framework starting...")
self.ssdp_server = SSDPServer()
louie.connect( self.add_device, 'Coherence.UPnP.SSDP.new_device', louie.Any)
louie.connect( self.remove_device, 'Coherence.UPnP.SSDP.removed_device', louie.Any)
louie.connect( self.receiver, 'Coherence.UPnP.Device.detection_completed', louie.Any)
#louie.connect( self.receiver, 'Coherence.UPnP.Service.detection_completed', louie.Any)
self.ssdp_server.subscribe("new_device", self.add_device)
self.ssdp_server.subscribe("removed_device", self.remove_device)
self.msearch = MSearch(self.ssdp_server)
reactor.addSystemEventTrigger( 'before', 'shutdown', self.shutdown)
if network_if:
self.hostname = get_ip_address(network_if)
else:
self.hostname = socket.gethostbyname(socket.gethostname())
if self.hostname == '127.0.0.1':
""" use interface detection via routing table as last resort """
self.hostname = get_host_address()
log.warning('running on host: %s' % self.hostname)
if self.hostname == '127.0.0.1':
log.error('detection of own ip failed, using 127.0.0.1 as own address, functionality will be limited')
self.urlbase = 'http://%s:%d/' % (self.hostname, self.web_server_port)
self.web_server = WebServer( self.web_server_port, self)
self.renew_service_subscription_loop = task.LoopingCall(self.check_devices)
self.renew_service_subscription_loop.start(20.0, now=False)
try:
plugins = config['plugins']
for p,a in plugins.items():
plugin = p
arguments = a
if not isinstance(arguments, dict):
arguments = {}
self.add_plugin(plugin, **arguments)
except KeyError:
log.warning("No plugin defined!")
except Exception, msg:
log.critical("Can't enable plugins, %s: %s!" % (plugin, msg))
def add_plugin(self, plugin, **kwargs):
log.info("adding plugin", plugin)
try:
plugin_class=globals().get(plugin)
for device in plugin_class.implements:
try:
device_class=globals().get(device)
log.critical("Activating %s plugin as %s..." % (plugin, device))
device_class(self, plugin_class, **kwargs)
except KeyError:
log.critical("Can't enable %s plugin, sub-system %s not found!" % (plugin, device))
except Exception, msg:
log.critical(traceback.print_exc())
log.critical("Can't enable %s plugin for sub-system %s, %s!" % (plugin, device, msg))
except KeyError:
log.critical("Can't enable %s plugin, not found!" % plugin)
except Exception, msg:
log.critical(traceback.print_exc())
log.critical("Can't enable %s plugin, %s!" % (plugin, msg))
def receiver( self, signal, *args, **kwargs):
#print "Coherence receiver called with", signal
#print kwargs
pass
def shutdown( self):
""" send service unsubscribe messages """
try:
self.renew_service_subscription_loop.stop()
except:
pass
for root_device in self.get_devices():
root_device.unsubscribe_service_subscriptions()
for device in root_device.get_devices():
device.unsubscribe_service_subscriptions()
self.ssdp_server.shutdown()
log.warning('Coherence UPnP framework shutdown')
def check_devices(self):
""" iterate over devices and their embedded ones and renew subscriptions """
for root_device in self.get_devices():
root_device.renew_service_subscriptions()
for device in root_device.get_devices():
device.renew_service_subscriptions()
def subscribe(self, name, callback):
self._callbacks.setdefault(name,[]).append(callback)
def unsubscribe(self, name, callback):
callbacks = self._callbacks.get(name,[])
if callback in callbacks:
callbacks.remove(callback)
self._callbacks[name] = callbacks
def callback(self, name, *args):
for callback in self._callbacks.get(name,[]):
callback(*args)
def get_device_with_usn(self, usn):
found = None
for device in self.devices:
if device.get_usn() == usn:
found = device
break
return found
def get_device_with_id(self, device_id):
found = None
for device in self.devices:
id = device.get_id()
if device_id[:5] != 'uuid:':
id = id[5:]
if id == device_id:
found = device
break
return found
def get_devices(self):
return self.devices
def add_device(self, device_type, infos):
log.info("adding",infos['ST'],infos['USN'])
if infos['ST'] == 'upnp:rootdevice':
log.info("adding upnp:rootdevice",infos['USN'])
root = RootDevice(infos)
self.devices.append(root)
else:
root_id = infos['USN'][:-len(infos['ST'])-2]
root = self.get_device_with_id(root_id)
device = Device(infos, root)
# fire this only after the device detection is fully completed
# and we are on the device level already, so we can work with them instead with the SSDP announce
#if infos['ST'] == 'upnp:rootdevice':
# self.callback("new_device", infos['ST'], infos)
def remove_device(self, device_type, infos):
log.info("removed device",infos['ST'],infos['USN'])
device = self.get_device_with_usn(infos['USN'])
if device:
self.devices.remove(device)
del device
if infos['ST'] == 'upnp:rootdevice':
louie.send('Coherence.UPnP.Device.removed', None, usn=infos['USN'])
self.callback("removed_device", infos['ST'], infos['USN'])
def add_web_resource(self, name, sub):
#self.web_server.web_root_resource.putChild(name, sub)
self.children[name] = sub
#print self.web_server.web_root_resource.children
def remove_web_resource(self, name):
# XXX implement me
pass
Coherence class now acts as a singleton
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2006, Frank Scholz <coherence@beebits.net>
import string
import socket
import os, sys
import traceback
from zope.interface import implements, Interface
from twisted.python import log, filepath, util
from twisted.python.components import registerAdapter
from twisted.internet import task, address
from twisted.internet import reactor
from nevow import athena, inevow, loaders, tags, static
from twisted.web import resource
import louie
from coherence.upnp.core.ssdp import SSDPServer
from coherence.upnp.core.msearch import MSearch
from coherence.upnp.core.device import Device, RootDevice
from coherence.upnp.core.utils import parse_xml, get_ip_address, get_host_address
from coherence.upnp.devices.control_point import ControlPoint
from coherence.upnp.devices.media_server import MediaServer
from coherence.upnp.devices.media_renderer import MediaRenderer
from coherence.backends.fs_storage import FSStore
from coherence.backends.elisa_storage import ElisaMediaStore
from coherence.backends.flickr_storage import FlickrStore
try:
from coherence.backends.gstreamer_audio_player import Player
except:
pass
from coherence.extern.logger import Logger, LOG_WARNING
log = Logger('Coherence')
class IWeb(Interface):
def goingLive(self):
pass
class Web(object):
def __init__(self, coherence):
super(Web, self).__init__()
self.coherence = coherence
class MenuFragment(athena.LiveFragment):
jsClass = u'Coherence.Base'
docFactory = loaders.stan(
tags.div(render=tags.directive('liveFragment'))[
tags.div(id="coherence_menu_box",class_="coherence_menu_box")[""],
]
)
def __init__(self, page):
super(MenuFragment, self).__init__()
self.page = page
self.coherence = page.coherence
self.tabs = []
def going_live(self):
log.info("add a view to the MenuFragment")
d = self.page.notifyOnDisconnect()
d.addCallback( self.remove_me)
d.addErrback( self.remove_me)
return self.tabs
athena.expose(going_live)
def add_tab(self,title,active):
log.info("add tab %s to the MenuFragment" % title)
new_tab = {u'title':unicode(title),
u'active':unicode(active)}
for t in self.tabs:
if t['title'] == new_tab['title']:
return
self.tabs.append(new_tab)
self.callRemote('addTab', new_tab)
def remove_me(self, result):
log.info("remove view from MenuFragment")
class DevicesFragment(athena.LiveFragment):
jsClass = u'Coherence.Devices'
docFactory = loaders.stan(
tags.div(render=tags.directive('liveFragment'))[
tags.div(id="Devices-container",class_="coherence_container")[""],
]
)
def __init__(self, page, active):
super(DevicesFragment, self).__init__()
self.page = page
self.coherence = page.coherence
self.page.menu.add_tab('Devices',active)
def going_live(self):
log.info("add a view to the DevicesFragment")
d = self.page.notifyOnDisconnect()
d.addCallback( self.remove_me)
d.addErrback( self.remove_me)
devices = []
for device in self.coherence.get_devices():
if device != None:
_,_,_,device_type,version = device.get_device_type().split(':')
name = unicode("%s:%s %s" % (device_type,version,device.get_friendly_name()))
usn = unicode(device.get_usn())
devices.append({u'name':name,u'usn':usn})
louie.connect( self.add_device, 'Coherence.UPnP.Device.detection_completed', louie.Any)
louie.connect( self.remove_device, 'Coherence.UPnP.Device.removed', louie.Any)
return devices
athena.expose(going_live)
def remove_me(self, result):
log.info("remove view from the DevicesFragment")
def add_device(self, device):
log.info("DevicesFragment found device %s %s of type %s" %(
device.get_usn(),
device.get_friendly_name(),
device.get_device_type()))
_,_,_,device_type,version = device.get_device_type().split(':')
name = unicode("%s:%s %s" % (device_type,version,device.get_friendly_name()))
usn = unicode(device.get_usn())
self.callRemote('addDevice', {u'name':name,u'usn':usn})
def remove_device(self, usn):
log.info("DevicesFragment remove device",usn)
self.callRemote('removeDevice', unicode(usn))
def render_devices(self, ctx, data):
cl = []
log.info('children: %s' % self.coherence.children)
for c in self.coherence.children:
device = self.coherence.get_device_with_id(c)
if device != None:
_,_,_,device_type,version = device.get_device_type().split(':')
cl.append( tags.li[tags.a(href='/'+c)[device_type,
':',
version,
' ',
device.get_friendly_name()]])
else:
cl.append( tags.li[c])
return ctx.tag[tags.ul[cl]]
class LoggingFragment(athena.LiveFragment):
jsClass = u'Coherence.Logging'
docFactory = loaders.stan(
tags.div(render=tags.directive('liveFragment'))[
tags.div(id="Logging-container",class_="coherence_container")[""],
]
)
def __init__(self, page, active):
super(LoggingFragment, self).__init__()
self.page = page
self.coherence = page.coherence
self.page.menu.add_tab('Logging','no')
def going_live(self):
log.info("add a view to the LoggingFragment")
d = self.page.notifyOnDisconnect()
d.addCallback( self.remove_me)
d.addErrback( self.remove_me)
return {}
athena.expose(going_live)
def remove_me(self, result):
log.info("remove view from the LoggingFragment")
class WebUI(athena.LivePage):
"""
"""
jsClass = u'Coherence'
addSlash = True
docFactory = loaders.xmlstr("""\
<html xmlns:nevow="http://nevow.com/ns/nevow/0.1">
<head>
<nevow:invisible nevow:render="liveglue" />
<link rel="stylesheet" type="text/css" href="web/main.css" />
</head>
<body>
<div id="coherence_header"><div class="coherence_title">Coherence</div><div nevow:render="menu"></div></div>
<div id="coherence_body">
<span nevow:render="devices" />
<span nevow:render="logging" />
</div>
</body>
</html>
""")
def __init__(self, *a, **kw):
super(WebUI, self).__init__( *a, **kw)
self.coherence = self.rootObject.coherence
self.jsModules.mapping.update({
'MochiKit': filepath.FilePath(__file__).parent().child('web').child('MochiKit.js').path})
self.jsModules.mapping.update({
'Coherence': filepath.FilePath(__file__).parent().child('web').child('Coherence.js').path})
self.jsModules.mapping.update({
'Coherence.Base': filepath.FilePath(__file__).parent().child('web').child('Coherence.Base.js').path})
self.jsModules.mapping.update({
'Coherence.Devices': filepath.FilePath(__file__).parent().child('web').child('Coherence.Devices.js').path})
self.jsModules.mapping.update({
'Coherence.Logging': filepath.FilePath(__file__).parent().child('web').child('Coherence.Logging.js').path})
self.menu = MenuFragment(self)
def childFactory(self, ctx, name):
log.info('WebUI childFactory: %s' % name)
try:
return self.rootObject.coherence.children[name]
except:
ch = super(WebUI, self).childFactory(ctx, name)
if ch is None:
p = util.sibpath(__file__, name)
log.info('looking for file',p)
if os.path.exists(p):
ch = static.File(p)
return ch
def render_listmenu(self, ctx, data):
l = []
l.append(tags.div(id="t",class_="coherence_menu_item")[tags.a(href='/'+'devices',class_="coherence_menu_link")['Devices']])
l.append(tags.div(id="t",class_="coherence_menu_item")[tags.a(href='/'+'logging',class_="coherence_menu_link")['Logging']])
return ctx.tag[l]
def render_listchilds(self, ctx, data):
cl = []
log.info('children: %s' % self.coherence.children)
for c in self.coherence.children:
device = self.coherence.get_device_with_id(c)
if device != None:
_,_,_,device_type,version = device.get_device_type().split(':')
cl.append( tags.li[tags.a(href='/'+c)[device_type,
':',
version,
' ',
device.get_friendly_name()]])
else:
cl.append( tags.li[c])
return ctx.tag[tags.ul[cl]]
def render_menu(self, ctx, data):
log.info('render_menu')
return self.menu
def render_devices(self, ctx, data):
log.info('render_devices')
return DevicesFragment(self,'yes')
def render_logging(self, ctx, data):
log.info('render_logging')
return LoggingFragment(self,'no')
class WebServer:
def __init__(self, port, coherence):
from nevow import appserver
def ResourceFactory( original):
return WebUI( IWeb, original)
registerAdapter(ResourceFactory, Web, inevow.IResource)
self.web_root_resource = Web(coherence)
#self.web_root_resource = inevow.IResource( web)
#print self.web_root_resource
self.site = appserver.NevowSite( self.web_root_resource)
reactor.listenTCP( port, self.site)
log.warning( "WebServer on port %d ready" % port)
class Coherence(object):
_instance_ = None # Singleton
def __new__(cls, *args, **kwargs):
obj = getattr(cls, '_instance_', None)
if obj is not None:
return obj
else:
obj = super(Coherence, cls).__new__(cls, *args, **kwargs)
cls._instance_ = obj
return obj
def __init__(self, config=None):
if not config:
config = {}
self.devices = []
self.children = {}
self._callbacks = {}
logmode = config.get('logmode', 'info')
network_if = config.get('interface')
self.web_server_port = config.get('serverport', 30020)
log.set_master_level(logmode)
subsystem_log = config.get('subsystem_log',{})
for subsystem,level in subsystem_log.items():
log.warning( "setting log-level for subsystem %s to %s" % (subsystem,level))
log.set_level(name=subsystem,level=level)
#log.disable(name='Variable')
#log.enable(name='Variable')
#log.set_level(name='Variable')
plugin = louie.TwistedDispatchPlugin()
louie.install_plugin(plugin)
log.warning("Coherence UPnP framework starting...")
self.ssdp_server = SSDPServer()
louie.connect( self.add_device, 'Coherence.UPnP.SSDP.new_device', louie.Any)
louie.connect( self.remove_device, 'Coherence.UPnP.SSDP.removed_device', louie.Any)
louie.connect( self.receiver, 'Coherence.UPnP.Device.detection_completed', louie.Any)
#louie.connect( self.receiver, 'Coherence.UPnP.Service.detection_completed', louie.Any)
self.ssdp_server.subscribe("new_device", self.add_device)
self.ssdp_server.subscribe("removed_device", self.remove_device)
self.msearch = MSearch(self.ssdp_server)
reactor.addSystemEventTrigger( 'before', 'shutdown', self.shutdown)
if network_if:
self.hostname = get_ip_address(network_if)
else:
self.hostname = socket.gethostbyname(socket.gethostname())
if self.hostname == '127.0.0.1':
""" use interface detection via routing table as last resort """
self.hostname = get_host_address()
log.warning('running on host: %s' % self.hostname)
if self.hostname == '127.0.0.1':
log.error('detection of own ip failed, using 127.0.0.1 as own address, functionality will be limited')
self.urlbase = 'http://%s:%d/' % (self.hostname, self.web_server_port)
self.web_server = WebServer( self.web_server_port, self)
self.renew_service_subscription_loop = task.LoopingCall(self.check_devices)
self.renew_service_subscription_loop.start(20.0, now=False)
plugins = config.get('plugins',[])
if not plugins:
log.warning("No plugin defined!")
else:
for p,a in plugins.items():
try:
plugin = p
arguments = a
if not isinstance(arguments, dict):
arguments = {}
self.add_plugin(plugin, **arguments)
except Exception, msg:
log.critical("Can't enable plugin, %s: %s!" % (plugin, msg))
continue
def add_plugin(self, plugin, **kwargs):
log.info("adding plugin", plugin)
try:
plugin_class=globals().get(plugin)
for device in plugin_class.implements:
try:
device_class=globals().get(device)
log.critical("Activating %s plugin as %s..." % (plugin, device))
device_class(self, plugin_class, **kwargs)
except KeyError:
log.critical("Can't enable %s plugin, sub-system %s not found!" % (plugin, device))
except Exception, msg:
log.critical(traceback.print_exc())
log.critical("Can't enable %s plugin for sub-system %s, %s!" % (plugin, device, msg))
except KeyError:
log.critical("Can't enable %s plugin, not found!" % plugin)
except Exception, msg:
log.critical(traceback.print_exc())
log.critical("Can't enable %s plugin, %s!" % (plugin, msg))
def receiver( self, signal, *args, **kwargs):
#print "Coherence receiver called with", signal
#print kwargs
pass
def shutdown( self):
""" send service unsubscribe messages """
try:
self.renew_service_subscription_loop.stop()
except:
pass
for root_device in self.get_devices():
root_device.unsubscribe_service_subscriptions()
for device in root_device.get_devices():
device.unsubscribe_service_subscriptions()
self.ssdp_server.shutdown()
log.warning('Coherence UPnP framework shutdown')
def check_devices(self):
""" iterate over devices and their embedded ones and renew subscriptions """
for root_device in self.get_devices():
root_device.renew_service_subscriptions()
for device in root_device.get_devices():
device.renew_service_subscriptions()
def subscribe(self, name, callback):
self._callbacks.setdefault(name,[]).append(callback)
def unsubscribe(self, name, callback):
callbacks = self._callbacks.get(name,[])
if callback in callbacks:
callbacks.remove(callback)
self._callbacks[name] = callbacks
def callback(self, name, *args):
for callback in self._callbacks.get(name,[]):
callback(*args)
def get_device_with_usn(self, usn):
found = None
for device in self.devices:
if device.get_usn() == usn:
found = device
break
return found
def get_device_with_id(self, device_id):
found = None
for device in self.devices:
id = device.get_id()
if device_id[:5] != 'uuid:':
id = id[5:]
if id == device_id:
found = device
break
return found
def get_devices(self):
return self.devices
def add_device(self, device_type, infos):
log.info("adding",infos['ST'],infos['USN'])
if infos['ST'] == 'upnp:rootdevice':
log.info("adding upnp:rootdevice",infos['USN'])
root = RootDevice(infos)
self.devices.append(root)
else:
root_id = infos['USN'][:-len(infos['ST'])-2]
root = self.get_device_with_id(root_id)
device = Device(infos, root)
# fire this only after the device detection is fully completed
# and we are on the device level already, so we can work with them instead with the SSDP announce
#if infos['ST'] == 'upnp:rootdevice':
# self.callback("new_device", infos['ST'], infos)
def remove_device(self, device_type, infos):
log.info("removed device",infos['ST'],infos['USN'])
device = self.get_device_with_usn(infos['USN'])
if device:
self.devices.remove(device)
del device
if infos['ST'] == 'upnp:rootdevice':
louie.send('Coherence.UPnP.Device.removed', None, usn=infos['USN'])
self.callback("removed_device", infos['ST'], infos['USN'])
def add_web_resource(self, name, sub):
#self.web_server.web_root_resource.putChild(name, sub)
self.children[name] = sub
#print self.web_server.web_root_resource.children
def remove_web_resource(self, name):
# XXX implement me
pass
|
# -*- coding: utf-8 -*-
#
# ZS documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 24 18:21:57 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
os.system("apt-cache search python3")
os.system("dpkg -l python3*")
os.system("which python3")
os.system("python3 -V")
# On readthedocs.org, the doc build is run inside a virtualenv, with zs
# installed, but the virtualenv bin/ dir is not on the path, so by default the
# 'zs' command is not available to the programoutput extension. But we want
# it to be. So fix that:
if (hasattr(sys, "real_prefix")
or sys.prefix != sys.getattr(sys, "base_prefix", sys.prefix)):
# we're in a virtualenv and sys.prefix points to the virtualenv
# directory. See:
# https://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv
# (base_prefix is needed to also detect pyvenv environments -- future
# proofing!)
os.environ["PATH"] = "%s/bin:%s" % (sys.prefix, os.environ["PATH"])
# And let's also make sure our example file is not present, to avoid
# embarassing failures later
import shutil
if os.path.exists("example/scratch"):
shutil.rmtree("example/scratch")
os.mkdir("example/scratch")
shutil.copyfile("example/tiny-4grams.txt",
"example/scratch/tiny-4grams.txt")
# And set the TIME variable to control the output format from 'time' (see
# index.rst)
os.environ["TIME"] = "\nReal time elapsed: %e seconds"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinxcontrib.programoutput',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
# IPython extension: don't bother with matplotlib, it probably isn't installed
# and anyway we don't need it.
ipython_mplbackend = None
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ZS'
copyright = u'2013-2014, Nathaniel J. Smith'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import zs
version = zs.__version__
# The full version, including alpha/beta/rc tags.
#release = '0.0.0'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ZS.tex', u'ZS Documentation',
u'Nathaniel J. Smith', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zs', u'ZS Documentation',
[u'Nathaniel J. Smith'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZS', u'ZS Documentation',
u'Nathaniel J. Smith', 'ZS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
remove RTD probe
# -*- coding: utf-8 -*-
#
# ZS documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 24 18:21:57 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# On readthedocs.org, the doc build is run inside a virtualenv, with zs
# installed, but the virtualenv bin/ dir is not on the path, so by default the
# 'zs' command is not available to the programoutput extension. But we want
# it to be. So fix that:
if (hasattr(sys, "real_prefix")
or sys.prefix != sys.getattr(sys, "base_prefix", sys.prefix)):
# we're in a virtualenv and sys.prefix points to the virtualenv
# directory. See:
# https://stackoverflow.com/questions/1871549/python-determine-if-running-inside-virtualenv
# (base_prefix is needed to also detect pyvenv environments -- future
# proofing!)
os.environ["PATH"] = "%s/bin:%s" % (sys.prefix, os.environ["PATH"])
# And let's also make sure our example file is not present, to avoid
# embarassing failures later
import shutil
if os.path.exists("example/scratch"):
shutil.rmtree("example/scratch")
os.mkdir("example/scratch")
shutil.copyfile("example/tiny-4grams.txt",
"example/scratch/tiny-4grams.txt")
# And set the TIME variable to control the output format from 'time' (see
# index.rst)
os.environ["TIME"] = "\nReal time elapsed: %e seconds"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinxcontrib.programoutput',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
]
# IPython extension: don't bother with matplotlib, it probably isn't installed
# and anyway we don't need it.
ipython_mplbackend = None
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ZS'
copyright = u'2013-2014, Nathaniel J. Smith'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import zs
version = zs.__version__
# The full version, including alpha/beta/rc tags.
#release = '0.0.0'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ZSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ZS.tex', u'ZS Documentation',
u'Nathaniel J. Smith', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zs', u'ZS Documentation',
[u'Nathaniel J. Smith'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ZS', u'ZS Documentation',
u'Nathaniel J. Smith', 'ZS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
# coding: utf-8
#
# Thanks to: https://gist.github.com/JhonatasMartins/7eed599a2f95d005b81f
#
from django.db import models
STATE_CHOICES = (
('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'),
('AM', 'Amazonas'), ('BA', 'Bahia'), ('CE', 'Ceará'),
('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'),
('GO', 'Goiás'), ('MA', 'Maranhão'), ('MT', 'Mato Grosso'),
('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'),
('PA', 'Pará'), ('PB', 'Paraíba'), ('PR', 'Paraná'),
('PE', 'Pernambuco'), ('PI', 'Piauí'), ('RJ', 'Rio de Janeiro'),
('RN', 'Rio Grande do Norte'), ('RS', 'Rio Grande do Sul'),
('RO', 'Rondônia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'),
('SP', 'São Paulo'), ('SE', 'Sergipe'), ('TO', 'Tocantins')
)
class Cidade(models.Model):
nome = models.CharField(max_length=60)
estado = models.CharField(max_length=2, choices=STATE_CHOICES)
def __unicode__(self):
return self.nome
Replace __unicode__ with __str__
# coding: utf-8
#
# Thanks to: https://gist.github.com/JhonatasMartins/7eed599a2f95d005b81f
#
from django.db import models
STATE_CHOICES = (
('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'),
('AM', 'Amazonas'), ('BA', 'Bahia'), ('CE', 'Ceará'),
('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'),
('GO', 'Goiás'), ('MA', 'Maranhão'), ('MT', 'Mato Grosso'),
('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'),
('PA', 'Pará'), ('PB', 'Paraíba'), ('PR', 'Paraná'),
('PE', 'Pernambuco'), ('PI', 'Piauí'), ('RJ', 'Rio de Janeiro'),
('RN', 'Rio Grande do Norte'), ('RS', 'Rio Grande do Sul'),
('RO', 'Rondônia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'),
('SP', 'São Paulo'), ('SE', 'Sergipe'), ('TO', 'Tocantins')
)
class Cidade(models.Model):
nome = models.CharField(max_length=60)
estado = models.CharField(max_length=2, choices=STATE_CHOICES)
def __str__(self):
return f"{self.nome} ({self.estado})"
|
#!/usr/bin/env python
# Run from directory above all the database gbks
import argparse
import os
import re
import csv
import sys
import pandas as pd
import logging
from collections import defaultdict
from dojo.taxonomy import NCBITree
from ninja_utils.parsers import FASTA
# The arg parser
def make_arg_parser():
parser = argparse.ArgumentParser(description='Description here')
parser.add_argument('-i', '--input', help='Directory in which to find all the cluster .gbk files', required=True)
parser.add_argument('-nt_cat', help='The nucleotide_catalog tsv file matching Genbank accessions to NCBI taxon IDs', required=True)
parser.add_argument('-o', '--output', help='Where to save the output files (default = new dir in cwd)', required=False, default='.')
parser.add_argument('--no_compile', help='Do not compile all the cluster files and DNA files into single .mpfa and .fna files',
action='store_true', required=False, default=False)
return parser
def parse_aa_seqs(gbk_file, tid_org, gbk_filepath, outpath):
ncbi_tid = str(tid_org[0])
organism = str(tid_org[1])
header = '>ncbi_tid|%s|genbank|' % ncbi_tid # start each sequence's identifier with the pacman >
# define the start of the sequence by the CDS line
title_begin = False
sequence_begin = False
mpfa_results = 'antismash_db_protein_seqs'
if mpfa_results not in os.listdir(outpath):
os.mkdir(os.path.join(outpath, mpfa_results))
i = 0
if gbk_file.endswith('.gbk'):
if gbk_file.endswith('final.gbk'):
pass
else:
# print(infilename)
i += 1
header_c = header + gbk_file.replace('.gbk', '')
header_c = header_c.replace('.clu', '_clu')
outfilename = 'aa_' + gbk_file + '.mpfa'
outfilename = outfilename.replace('.gbk', '').replace('.clu', '_clu')
outfile = open(os.path.join(outpath, mpfa_results, outfilename), 'w')
with open(os.path.join(gbk_filepath), 'r') as infile:
for line in infile:
if title_begin: # only do this if 'CDS ' starts the line
if line.startswith(" /locus_tag"):
p = re.compile(r"^(\s+)(\/locus_tag=)\"(.*)\"")
m = p.search(line) # searches using the regex defined above
outfile_m = m.group(3)
outfile.write(header_c + '_' + outfile_m) # use the filename to ID the file on the first line
outfile.write('|organism|%s\n' % organism)
if line.startswith(' /translation'):
sequence_begin = True
if sequence_begin:
if line.startswith(' /translation'):
aa_p = re.compile(r"^(\s+)(\/translation=\")([A-Z]+)")
aa_m = aa_p.search(line) # searches using the regex defined above
out_aa = aa_m.group(3)
outfile.write(out_aa)
if line.startswith(' '):
outfile.write(''.join([ch for ch in line if ch in set('G,A,L,M,F,W,K,Q,E,S,P,V,I,C,Y,H,R,N,D,T')]))
else:
outfile.write('\n')
sequence_begin = False
if line.startswith(' CDS '):
title_begin = True
else:
title_begin = False
sequence_begin = False
elif line.startswith(' CDS '):
title_begin = True # identifies the line starting with CDS as cluster module sequence start
outfile.close()
return None
def parse_dna_seqs(gbk_file, tid_org, gbk_filepath, outpath):
ncbi_tid = str(tid_org[0])
organism = str(tid_org[1])
header = '>ncbi_tid|%s|genbank|' % ncbi_tid # start each sequence's identifier with the pacman >
# define the start of the sequence by the ORIGIN line
sequence_begin = False
dna_results = 'antismash_db_dna_seqs'
if dna_results not in os.listdir(outpath):
os.mkdir(os.path.join(outpath, dna_results))
i = 0
if gbk_file.endswith('.gbk'):
if gbk_file.endswith('final.gbk'):
pass
else:
# print(infilename)
i += 1
header_c = header + gbk_file.replace('.gbk', '')
header_c = header_c.replace('.clu', '_clu')
outfilename = 'dna_' + gbk_file + '.fna'
outfilename = outfilename.replace('.gbk', '').replace('.clu', '_clu')
outfile = open(os.path.join(outpath, dna_results, outfilename), 'w')
outfile.write(header_c + '|organism|%s\n' % organism)
with open(os.path.join(gbk_filepath), 'r') as infile:
for line in infile:
if sequence_begin: # only do this if ORIGIN starts the line
# joins together only the characters on the line in the set atcg
outfile.write(''.join([ch for ch in line if ch in set(('a', 't', 'c', 'g'))]))
elif line.startswith('ORIGIN'):
sequence_begin = True # identifies the line starting with ORIGIN as sequence start
outfile.close()
return None
def parse_cluster_types(gbkpath, outpath, gbk_dd):
filelist = os.listdir(gbkpath)
# debug
# print(filelist)
# define the start of the sequence by the CDS line
cluster_begin = False
if 'antismash_db_product_types' not in os.listdir(outpath):
os.mkdir(os.path.join(outpath, 'antismash_db_product_types'))
i = 0
type_dd = defaultdict(dict)
for gbk in filelist:
if gbk.endswith('.gbk'):
if gbk.endswith('final.gbk'):
pass
# print(infilename)
i += 1
clusterid = gbk.replace('.gbk', '')
clusterid = clusterid.replace('.clu', '_clu')
gbk_id = gbk.split('.cluster')[0]
tid_org = []
tid_org = gbk_dd[gbk_id]
if not tid_org:
tid_org = ['na', 'k__None;p__None;c__None;o__None;f__None;g__None;s__None;t__None']
ncbi_tid = str(tid_org[0])
organism = str(tid_org[1])
cluster_label = 'ncbi_tid|%s|genbank|%s|organism|%s' % (ncbi_tid, clusterid, organism)
with open(os.path.join(gbkpath, gbk), 'r') as in_gbk:
for line in in_gbk:
if cluster_begin:
if line.startswith(" /product"):
p = re.compile(r"^(\s+)\/(product)=\"(.*)\"")
m = p.search(line) # searches using the regex defined above
prod = str(m.group(3))
type_dd[cluster_label] = prod
elif line.startswith(" gene"):
cluster_begin = False
elif line.startswith(" cluster"):
cluster_begin = True
cdf = pd.DataFrame.from_dict(type_dd, orient='index')
cdf.columns = ['cluster_type']
cdf.sort_index(axis=0, inplace=True)
with open(os.path.join(outpath, 'antismash_db_product_types', 'asdb_product_types.csv'), 'w') as outfile:
cdf.to_csv(outfile)
return None
def compile_files(outpath):
protein_seqs = os.path.join(outpath, 'asDB_protein_seqs.mpfa')
dna_seqs = os.path.join(outpath, 'asDB_dna_seqs.fna')
with open(protein_seqs, 'w') as aa_outfile:
for aafile in os.listdir(os.path.join(outpath, 'antismash_db_protein_seqs')):
aafile = os.path.join(outpath, 'antismash_db_protein_seqs', aafile)
with open(aafile, 'r') as aa_in:
fasta_gen = FASTA(aa_in)
for header, sequence in fasta_gen.read():
aa_outfile.write(header + '\n')
aa_outfile.write(sequence + '\n')
aa_outfile.close()
with open(dna_seqs, 'w') as dna_outfile:
for dnafile in os.listdir(os.path.join(outpath, 'antismash_db_dna_seqs')):
dnafile = os.path.join(outpath, 'antismash_db_dna_seqs', dnafile)
with open(dnafile, 'r') as dna_in:
fasta_gen = FASTA(dna_in)
for header, sequence in fasta_gen.read():
dna_outfile.write(header + '\n')
dna_outfile.write(sequence + '\n')
dna_outfile.close()
return None
def tid_to_name(tid, nt=NCBITree()):
tid = int(tid)
organism = nt.green_genes_lineage(tid, depth=8, depth_force=True)
return organism
def main():
parser = make_arg_parser()
args = parser.parse_args()
nt_cat = os.path.join(args.nt_cat)
gbkpath = os.path.join(args.input)
outpath = os.path.join(args.output)
if not os.path.isdir(outpath):
os.mkdir(os.path.join(outpath))
if not os.path.isdir(outpath):
print('\nError creating output directory; check given path and try again\n')
sys.exit()
logfile = os.path.join(outpath, 'scrapelog.log')
logging.basicConfig(filename=logfile, level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
gbks = os.listdir(gbkpath)
gbks = [f for f in gbks if f.endswith('gbk')]
with open(nt_cat, 'r') as nt_catalog:
gbk_dd = defaultdict(list)
reader = csv.reader(nt_catalog, delimiter='\t')
next(reader)
nt = NCBITree()
gbk_set = set()
for gbk_file in gbks:
gbk_id = gbk_file.split('.cluster')[0]
gbk_set.add(gbk_id)
for line in reader:
if line[1] in gbk_set:
tid = line[2]
organism = tid_to_name(tid, nt=nt)
# print(line[1] + tid + organism)
gbk_dd[line[1]] = [tid, organism]
i = 0
for gbk_file in gbks:
gbk_id = gbk_file.split('.cluster')[0]
tid_org = gbk_dd[gbk_id]
if not tid_org:
print('Error getting taxonomy for %s for cluster file %s' % (gbk_id, gbk_file))
logging.warning('Error getting taxonomy for %s for cluster file %s' % (gbk_id, gbk_file))
tid_org = ['na', 'k__None;p__None;c__None;o__None;f__None;g__None;s__None;t__None']
i += 1
# print(tid_org)
# ncbi_tid = str(tid_org[0])
# organism = str(tid_org[1])
gbk_filepath = os.path.join(gbkpath, gbk_file)
parse_aa_seqs(gbk_file, tid_org, gbk_filepath, outpath)
parse_dna_seqs(gbk_file, tid_org, gbk_filepath, outpath)
parse_cluster_types(gbkpath, outpath, gbk_dd)
if not args.no_compile:
compile_files(outpath)
logging.warning('DOJO could not acquire NCBI tid information for %s clusters' % i)
if __name__ == '__main__':
main()
Fixed bug where 2-line product names would fail regex
#!/usr/bin/env python
# Run from directory above all the database gbks
import argparse
import os
import re
import csv
import sys
import pandas as pd
import logging
from collections import defaultdict
from dojo.taxonomy import NCBITree
from ninja_utils.parsers import FASTA
# The arg parser
def make_arg_parser():
parser = argparse.ArgumentParser(description='Description here')
parser.add_argument('-i', '--input', help='Directory in which to find all the cluster .gbk files', required=True)
parser.add_argument('-nt_cat', help='The nucleotide_catalog tsv file matching Genbank accessions to NCBI taxon IDs', required=True)
parser.add_argument('-o', '--output', help='Where to save the output files (default = new dir in cwd)', required=False, default='.')
parser.add_argument('--no_compile', help='Do not compile all the cluster files and DNA files into single .mpfa and .fna files',
action='store_true', required=False, default=False)
return parser
def parse_aa_seqs(gbk_file, tid_org, gbk_filepath, outpath):
ncbi_tid = str(tid_org[0])
organism = str(tid_org[1])
header = '>ncbi_tid|%s|genbank|' % ncbi_tid # start each sequence's identifier with the pacman >
# define the start of the sequence by the CDS line
title_begin = False
sequence_begin = False
mpfa_results = 'antismash_db_protein_seqs'
if mpfa_results not in os.listdir(outpath):
os.mkdir(os.path.join(outpath, mpfa_results))
i = 0
if gbk_file.endswith('.gbk'):
if gbk_file.endswith('final.gbk'):
pass
else:
# print(infilename)
i += 1
header_c = header + gbk_file.replace('.gbk', '')
header_c = header_c.replace('.clu', '_clu')
outfilename = 'aa_' + gbk_file + '.mpfa'
outfilename = outfilename.replace('.gbk', '').replace('.clu', '_clu')
outfile = open(os.path.join(outpath, mpfa_results, outfilename), 'w')
with open(os.path.join(gbk_filepath), 'r') as infile:
for line in infile:
if title_begin: # only do this if 'CDS ' starts the line
if line.startswith(" /locus_tag"):
p = re.compile(r"^(\s+)(\/locus_tag=)\"(.*)\"")
m = p.search(line) # searches using the regex defined above
outfile_m = m.group(3)
outfile.write(header_c + '_' + outfile_m) # use the filename to ID the file on the first line
outfile.write('|organism|%s\n' % organism)
if line.startswith(' /translation'):
sequence_begin = True
if sequence_begin:
if line.startswith(' /translation'):
aa_p = re.compile(r"^(\s+)(\/translation=\")([A-Z]+)")
aa_m = aa_p.search(line) # searches using the regex defined above
out_aa = aa_m.group(3)
outfile.write(out_aa)
if line.startswith(' '):
outfile.write(''.join([ch for ch in line if ch in set('G,A,L,M,F,W,K,Q,E,S,P,V,I,C,Y,H,R,N,D,T')]))
else:
outfile.write('\n')
sequence_begin = False
if line.startswith(' CDS '):
title_begin = True
else:
title_begin = False
sequence_begin = False
elif line.startswith(' CDS '):
title_begin = True # identifies the line starting with CDS as cluster module sequence start
outfile.close()
return None
def parse_dna_seqs(gbk_file, tid_org, gbk_filepath, outpath):
ncbi_tid = str(tid_org[0])
organism = str(tid_org[1])
header = '>ncbi_tid|%s|genbank|' % ncbi_tid # start each sequence's identifier with the pacman >
# define the start of the sequence by the ORIGIN line
sequence_begin = False
dna_results = 'antismash_db_dna_seqs'
if dna_results not in os.listdir(outpath):
os.mkdir(os.path.join(outpath, dna_results))
i = 0
if gbk_file.endswith('.gbk'):
if gbk_file.endswith('final.gbk'):
pass
else:
# print(infilename)
i += 1
header_c = header + gbk_file.replace('.gbk', '')
header_c = header_c.replace('.clu', '_clu')
outfilename = 'dna_' + gbk_file + '.fna'
outfilename = outfilename.replace('.gbk', '').replace('.clu', '_clu')
outfile = open(os.path.join(outpath, dna_results, outfilename), 'w')
outfile.write(header_c + '|organism|%s\n' % organism)
with open(os.path.join(gbk_filepath), 'r') as infile:
for line in infile:
if sequence_begin: # only do this if ORIGIN starts the line
# joins together only the characters on the line in the set atcg
outfile.write(''.join([ch for ch in line if ch in set(('a', 't', 'c', 'g'))]))
elif line.startswith('ORIGIN'):
sequence_begin = True # identifies the line starting with ORIGIN as sequence start
outfile.close()
return None
def parse_cluster_types(gbkpath, outpath, gbk_dd):
filelist = os.listdir(gbkpath)
# debug
# print(filelist)
# define the start of the sequence by the CDS line
cluster_begin = False
if 'antismash_db_product_types' not in os.listdir(outpath):
os.mkdir(os.path.join(outpath, 'antismash_db_product_types'))
i = 0
type_dd = defaultdict(dict)
for gbk in filelist:
if gbk.endswith('.gbk'):
if gbk.endswith('final.gbk'):
pass
# print(infilename)
i += 1
clusterid = gbk.replace('.gbk', '')
clusterid = clusterid.replace('.clu', '_clu')
gbk_id = gbk.split('.cluster')[0]
# tid_org = []
tid_org = gbk_dd[gbk_id]
if not tid_org:
tid_org = ['na', 'k__None;p__None;c__None;o__None;f__None;g__None;s__None;t__None']
ncbi_tid = str(tid_org[0])
organism = str(tid_org[1])
cluster_label = 'ncbi_tid|%s|genbank|%s|organism|%s' % (ncbi_tid, clusterid, organism)
with open(os.path.join(gbkpath, gbk), 'r') as in_gbk:
for line in in_gbk:
if cluster_begin:
if line.startswith(" /product"):
if line.endswith("\"\n"):
p = re.compile(r"^(\s+)\/(product)=\"(.*)\"")
m = p.search(line)
prod = str(m.group(3))
else:
p = re.compile(r"^(\s+)\/(product)=\"(.*)$")
m = p.search(line)
nxline = next(in_gbk)
p2 = re.compile(r"( )(.*)\"")
m2 = p2.search(nxline)
prod = ' '.join([str(m.group(3)), str(m2.group(2))])
type_dd[cluster_label] = prod
elif line.startswith(" gene"):
cluster_begin = False
elif line.startswith(" cluster"):
cluster_begin = True
cdf = pd.DataFrame.from_dict(type_dd, orient='index')
cdf.columns = ['cluster_type']
cdf.sort_index(axis=0, inplace=True)
with open(os.path.join(outpath, 'antismash_db_product_types', 'asdb_product_types.csv'), 'w') as outfile:
cdf.to_csv(outfile)
return None
def compile_files(outpath):
protein_seqs = os.path.join(outpath, 'asDB_protein_seqs.mpfa')
dna_seqs = os.path.join(outpath, 'asDB_dna_seqs.fna')
with open(protein_seqs, 'w') as aa_outfile:
for aafile in os.listdir(os.path.join(outpath, 'antismash_db_protein_seqs')):
aafile = os.path.join(outpath, 'antismash_db_protein_seqs', aafile)
with open(aafile, 'r') as aa_in:
fasta_gen = FASTA(aa_in)
for header, sequence in fasta_gen.read():
aa_outfile.write(header + '\n')
aa_outfile.write(sequence + '\n')
aa_outfile.close()
with open(dna_seqs, 'w') as dna_outfile:
for dnafile in os.listdir(os.path.join(outpath, 'antismash_db_dna_seqs')):
dnafile = os.path.join(outpath, 'antismash_db_dna_seqs', dnafile)
with open(dnafile, 'r') as dna_in:
fasta_gen = FASTA(dna_in)
for header, sequence in fasta_gen.read():
dna_outfile.write(header + '\n')
dna_outfile.write(sequence + '\n')
dna_outfile.close()
return None
def tid_to_name(tid, nt=NCBITree()):
tid = int(tid)
organism = nt.green_genes_lineage(tid, depth=8, depth_force=True)
return organism
def main():
parser = make_arg_parser()
args = parser.parse_args()
nt_cat = os.path.join(args.nt_cat)
gbkpath = os.path.join(args.input)
outpath = os.path.join(args.output)
if not os.path.isdir(outpath):
os.mkdir(os.path.join(outpath))
if not os.path.isdir(outpath):
print('\nError creating output directory; check given path and try again\n')
sys.exit()
logfile = os.path.join(outpath, 'scrapelog.log')
logging.basicConfig(filename=logfile, level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
gbks = os.listdir(gbkpath)
gbks = [f for f in gbks if f.endswith('gbk')]
with open(nt_cat, 'r') as nt_catalog:
gbk_dd = defaultdict(list)
reader = csv.reader(nt_catalog, delimiter='\t')
next(reader)
nt = NCBITree()
gbk_set = set()
for gbk_file in gbks:
gbk_id = gbk_file.split('.cluster')[0]
gbk_set.add(gbk_id)
for line in reader:
if line[1] in gbk_set:
tid = line[2]
organism = tid_to_name(tid, nt=nt)
# print(line[1] + tid + organism)
gbk_dd[line[1]] = [tid, organism]
i = 0
for gbk_file in gbks:
gbk_id = gbk_file.split('.cluster')[0]
tid_org = gbk_dd[gbk_id]
if not tid_org:
print('Error getting taxonomy for %s for cluster file %s' % (gbk_id, gbk_file))
logging.warning('Error getting taxonomy for %s for cluster file %s' % (gbk_id, gbk_file))
tid_org = ['na', 'k__None;p__None;c__None;o__None;f__None;g__None;s__None;t__None']
i += 1
# print(tid_org)
# ncbi_tid = str(tid_org[0])
# organism = str(tid_org[1])
gbk_filepath = os.path.join(gbkpath, gbk_file)
parse_aa_seqs(gbk_file, tid_org, gbk_filepath, outpath)
parse_dna_seqs(gbk_file, tid_org, gbk_filepath, outpath)
parse_cluster_types(gbkpath, outpath, gbk_dd)
if not args.no_compile:
compile_files(outpath)
logging.warning('DOJO could not acquire NCBI tid information for %s clusters' % i)
if __name__ == '__main__':
main()
|
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import testscenarios
from solum.api.controllers.v1.datamodel import operation as operationmodel
from solum.api.controllers.v1 import operation
from solum.common import exception
from solum import objects
from solum.tests import base
from solum.tests import fakes
load_tests = testscenarios.load_tests_apply_scenarios
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.handlers.operation_handler.OperationHandler')
class TestOperationController(base.BaseTestCase):
def setUp(self):
super(TestOperationController, self).setUp()
objects.load()
def test_operation_get(self, OperationHandler, resp_mock, request_mock):
hand_get = OperationHandler.return_value.get
hand_get.return_value = fakes.FakeOperation()
cont = operation.OperationController('test_id')
cont.get()
hand_get.assert_called_with('test_id')
self.assertEqual(200, resp_mock.status)
def test_operation_get_not_found(self, OperationHandler, resp_mock,
request_mock):
hand_get = OperationHandler.return_value.get
hand_get.side_effect = exception.ResourceNotFound(
id='test_id', name='operation')
cont = operation.OperationController('test_id')
cont.get()
hand_get.assert_called_with('test_id')
self.assertEqual(404, resp_mock.status)
def test_operation_put_none(self, OperationHandler, resp_mock,
request_mock):
request_mock.body = None
request_mock.content_type = 'application/json'
hand_put = OperationHandler.return_value.put
hand_put.return_value = fakes.FakeOperation()
operation.OperationController('test_id').put()
self.assertEqual(400, resp_mock.status)
def test_operation_put_not_found(self, OperationHandler, resp_mock,
request_mock):
json_update = {'name': 'foo'}
request_mock.body = json.dumps(json_update)
request_mock.content_type = 'application/json'
hand_update = OperationHandler.return_value.update
hand_update.side_effect = exception.ResourceNotFound(
id='test_id', name='operation')
operation.OperationController('test_id').put()
hand_update.assert_called_with('test_id', json_update)
self.assertEqual(404, resp_mock.status)
def test_operation_put_ok(self, OperationHandler, resp_mock, request_mock):
json_update = {'name': 'foo'}
request_mock.body = json.dumps(json_update)
request_mock.content_type = 'application/json'
hand_update = OperationHandler.return_value.update
hand_update.return_value = fakes.FakeOperation()
operation.OperationController('test_id').put()
hand_update.assert_called_with('test_id', json_update)
self.assertEqual(200, resp_mock.status)
def test_operation_delete_not_found(self, OperationHandler,
resp_mock, request_mock):
hand_delete = OperationHandler.return_value.delete
hand_delete.side_effect = exception.ResourceNotFound(
id='test_id', name='operation')
obj = operation.OperationController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(404, resp_mock.status)
def test_operation_delete_ok(self, OperationHandler, resp_mock,
request_mock):
hand_delete = OperationHandler.return_value.delete
hand_delete.return_value = None
obj = operation.OperationController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(204, resp_mock.status)
class TestOperationAsDict(base.BaseTestCase):
scenarios = [
('none', dict(data=None)),
('one', dict(data={'name': 'foo'})),
('full', dict(data={'uri': 'http://example.com/v1/operations/x1',
'name': 'Example operation',
'type': 'operation',
'project_id': '1dae5a09ef2b4d8cbf3594b0eb4f6b94',
'user_id': '55f41cf46df74320b9486a35f5d28a11',
'documentation':
'http://example.com/docs/resume_op',
'target_resource':
'http://example.com/instances/uuid'}))
]
def test_as_dict(self):
objects.load()
if self.data is None:
s = operationmodel.Operation()
self.data = {}
else:
s = operationmodel.Operation(**self.data)
if 'uri' in self.data:
del self.data['uri']
if 'type' in self.data:
del self.data['type']
self.assertEqual(self.data, s.as_dict(objects.registry.Operation))
Add missing tests for Operations Controller
post, post_nodata, get_all tests were missing for Operations Controller.
Change-Id: I39f92d7a12df5d51c1cb79561eaf4628360ca037
Closes-Bug: #1313721
# Copyright 2013 - Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
import testscenarios
from solum.api.controllers.v1.datamodel import operation as operationmodel
from solum.api.controllers.v1 import operation
from solum.common import exception
from solum import objects
from solum.tests import base
from solum.tests import fakes
load_tests = testscenarios.load_tests_apply_scenarios
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.handlers.operation_handler.OperationHandler')
class TestOperationController(base.BaseTestCase):
def setUp(self):
super(TestOperationController, self).setUp()
objects.load()
def test_operation_get(self, OperationHandler, resp_mock, request_mock):
hand_get = OperationHandler.return_value.get
hand_get.return_value = fakes.FakeOperation()
cont = operation.OperationController('test_id')
cont.get()
hand_get.assert_called_with('test_id')
self.assertEqual(200, resp_mock.status)
def test_operation_get_not_found(self, OperationHandler, resp_mock,
request_mock):
hand_get = OperationHandler.return_value.get
hand_get.side_effect = exception.ResourceNotFound(
id='test_id', name='operation')
cont = operation.OperationController('test_id')
cont.get()
hand_get.assert_called_with('test_id')
self.assertEqual(404, resp_mock.status)
def test_operation_put_none(self, OperationHandler, resp_mock,
request_mock):
request_mock.body = None
request_mock.content_type = 'application/json'
hand_put = OperationHandler.return_value.put
hand_put.return_value = fakes.FakeOperation()
operation.OperationController('test_id').put()
self.assertEqual(400, resp_mock.status)
def test_operation_put_not_found(self, OperationHandler, resp_mock,
request_mock):
json_update = {'name': 'foo'}
request_mock.body = json.dumps(json_update)
request_mock.content_type = 'application/json'
hand_update = OperationHandler.return_value.update
hand_update.side_effect = exception.ResourceNotFound(
id='test_id', name='operation')
operation.OperationController('test_id').put()
hand_update.assert_called_with('test_id', json_update)
self.assertEqual(404, resp_mock.status)
def test_operation_put_ok(self, OperationHandler, resp_mock, request_mock):
json_update = {'name': 'foo'}
request_mock.body = json.dumps(json_update)
request_mock.content_type = 'application/json'
hand_update = OperationHandler.return_value.update
hand_update.return_value = fakes.FakeOperation()
operation.OperationController('test_id').put()
hand_update.assert_called_with('test_id', json_update)
self.assertEqual(200, resp_mock.status)
def test_operation_delete_not_found(self, OperationHandler,
resp_mock, request_mock):
hand_delete = OperationHandler.return_value.delete
hand_delete.side_effect = exception.ResourceNotFound(
id='test_id', name='operation')
obj = operation.OperationController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(404, resp_mock.status)
def test_operation_delete_ok(self, OperationHandler, resp_mock,
request_mock):
hand_delete = OperationHandler.return_value.delete
hand_delete.return_value = None
obj = operation.OperationController('test_id')
obj.delete()
hand_delete.assert_called_with('test_id')
self.assertEqual(204, resp_mock.status)
@mock.patch('pecan.request', new_callable=fakes.FakePecanRequest)
@mock.patch('pecan.response', new_callable=fakes.FakePecanResponse)
@mock.patch('solum.api.handlers.operation_handler.OperationHandler')
class TestOperationsController(base.BaseTestCase):
def setUp(self):
super(TestOperationsController, self).setUp()
objects.load()
def test_operations_get_all(self, handler_mock, resp_mock, request_mock):
hand_get_all = handler_mock.return_value.get_all
hand_get_all.return_value = [fakes.FakeOperation()]
obj = operation.OperationsController()
resp = obj.get_all()
self.assertIsNotNone(resp)
self.assertEqual(200, resp_mock.status)
def test_operations_post(self, handler_mock, resp_mock, request_mock):
json_create = {'name': 'foo',
'description': 'test_desc_operation',
'user_id': 'user_id_test',
'project_id': 'project_id_test'}
request_mock.body = json.dumps(json_create)
request_mock.content_type = 'application/json'
handler_create = handler_mock.return_value.create
handler_create.return_value = fakes.FakeOperation()
operation.OperationsController().post()
handler_create.assert_called_with(json_create)
self.assertEqual(201, resp_mock.status)
handler_create.assert_called_once_with(json_create)
def test_operations_post_nodata(self, handler_mock,
resp_mock, request_mock):
request_mock.body = ''
request_mock.content_type = 'application/json'
handler_create = handler_mock.return_value.create
handler_create.return_value = fakes.FakeComponent()
ret_val = operation.OperationsController().post()
self.assertEqual("Missing argument: \"data\"",
str(ret_val['faultstring']))
self.assertEqual(400, resp_mock.status)
class TestOperationAsDict(base.BaseTestCase):
scenarios = [
('none', dict(data=None)),
('one', dict(data={'name': 'foo'})),
('full', dict(data={'uri': 'http://example.com/v1/operations/x1',
'name': 'Example operation',
'type': 'operation',
'project_id': '1dae5a09ef2b4d8cbf3594b0eb4f6b94',
'user_id': '55f41cf46df74320b9486a35f5d28a11',
'documentation':
'http://example.com/docs/resume_op',
'target_resource':
'http://example.com/instances/uuid'}))
]
def test_as_dict(self):
objects.load()
if self.data is None:
s = operationmodel.Operation()
self.data = {}
else:
s = operationmodel.Operation(**self.data)
if 'uri' in self.data:
del self.data['uri']
if 'type' in self.data:
del self.data['type']
self.assertEqual(self.data, s.as_dict(objects.registry.Operation))
|
# vim:set et sts=4 sw=4:
#
# ibus - The Input Bus
#
# Copyright (c) 2007-2008 Huang Peng <shawn.p.huang@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
__all__ = (
"KeyboardShortcutSelection",
"KeyboardShortcutSelectionDialog",
);
import gobject
import gtk
from gtk import gdk
from gettext import dgettext
_ = lambda a : dgettext("ibus", a)
N_ = lambda a : a
class KeyboardShortcutSelection(gtk.VBox):
def __init__(self, shortcuts = None):
super(KeyboardShortcutSelection, self).__init__()
self.__init_ui()
self.set_shortcuts(shortcuts)
def __init_ui(self):
# label = gtk.Label(_("Keyboard shortcuts:"))
# label.set_justify(gtk.JUSTIFY_LEFT)
# label.set_alignment(0.0, 0.5)
# self.pack_start(label, False, True, 4)
# shortcuts view
viewport = gtk.Viewport()
viewport.set_shadow_type(gtk.SHADOW_IN)
self.__shortcut_view = gtk.TreeView(gtk.ListStore(gobject.TYPE_STRING))
self.__shortcut_view.set_size_request(-1, 100)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(_("Keyboard shortcuts"), renderer, text = 0)
self.__shortcut_view.append_column(column)
self.__shortcut_view.connect("cursor-changed", self.__shortcut_view_cursor_changed_cb)
viewport.add(self.__shortcut_view)
self.pack_start(viewport, True, True, 4)
# key code
hbox = gtk.HBox()
label = gtk.Label(_("Key code:"))
label.set_justify(gtk.JUSTIFY_LEFT)
label.set_alignment(0.0, 0.5)
hbox.pack_start(label, False, True, 4)
self.__keycode_entry = gtk.Entry()
self.__keycode_entry.connect("notify::text", self.__keycode_entry_notify_cb)
hbox.pack_start(self.__keycode_entry, True, True, 4)
self.__keycode_button = gtk.Button("...")
self.__keycode_button.connect("clicked", self.__keycode_button_clicked_cb)
hbox.pack_start(self.__keycode_button, False, True, 4)
self.pack_start(hbox, False, True, 4)
# modifiers
hbox = gtk.HBox()
label = gtk.Label(_("Modifiers:"))
label.set_justify(gtk.JUSTIFY_LEFT)
label.set_alignment(0.0, 0.5)
hbox.pack_start(label, False, True, 4)
table = gtk.Table(4, 2)
self.__modifier_buttons = []
self.__modifier_buttons.append(("Control", gtk.CheckButton("_Control"), gdk.CONTROL_MASK))
self.__modifier_buttons.append(("Alt", gtk.CheckButton("A_lt"), gdk.MOD1_MASK))
self.__modifier_buttons.append(("Shift", gtk.CheckButton("_Shift"), gdk.SHIFT_MASK))
self.__modifier_buttons.append(("Meta", gtk.CheckButton("_Meta"), gdk.META_MASK))
self.__modifier_buttons.append(("Super", gtk.CheckButton("S_uper"), gdk.SUPER_MASK))
self.__modifier_buttons.append(("Hyper", gtk.CheckButton("_Hyper"), gdk.HYPER_MASK))
self.__modifier_buttons.append(("Capslock", gtk.CheckButton("Capsloc_k"), gdk.LOCK_MASK))
self.__modifier_buttons.append(("Release", gtk.CheckButton("_Release"), gdk.RELEASE_MASK))
for name, button, mask in self.__modifier_buttons:
button.connect("toggled", self.__modifier_button_toggled_cb, name)
table.attach(self.__modifier_buttons[0][1], 0, 1, 0, 1)
table.attach(self.__modifier_buttons[1][1], 1, 2, 0, 1)
table.attach(self.__modifier_buttons[2][1], 2, 3, 0, 1)
table.attach(self.__modifier_buttons[3][1], 3, 4, 0, 1)
table.attach(self.__modifier_buttons[4][1], 0, 1, 1, 2)
table.attach(self.__modifier_buttons[5][1], 1, 2, 1, 2)
table.attach(self.__modifier_buttons[6][1], 2, 3, 1, 2)
table.attach(self.__modifier_buttons[7][1], 3, 4, 1, 2)
hbox.pack_start(table, True, True, 4)
self.pack_start(hbox, False, True, 4)
# buttons
hbox = gtk.HBox()
# add button
self.__add_button = gtk.Button(stock = gtk.STOCK_ADD)
self.__add_button.connect("clicked", self.__add_button_clicked_cb)
hbox.pack_start(self.__add_button)
# apply button
self.__apply_button = gtk.Button(stock = gtk.STOCK_APPLY)
self.__apply_button.connect("clicked", self.__apply_button_clicked_cb)
hbox.pack_start(self.__apply_button)
# delete button
self.__delete_button = gtk.Button(stock = gtk.STOCK_DELETE)
self.__delete_button.connect("clicked", self.__delete_button_clicked_cb)
hbox.pack_start(self.__delete_button)
self.pack_start(hbox, False, True, 4)
def set_shortcuts(self, shortcuts = None):
if shortcuts == None:
shortcuts = []
model = self.__shortcut_view.get_model()
model.clear()
for shortcut in shortcuts:
model.insert(0, (shortcut,))
def get_shortcuts(self):
model = self.__shortcut_view.get_model()
return map(lambda i:i[0] , model)
def add_shortcut(self, shortcut):
model = self.__shortcut_view.get_model()
if len(model) >= 6:
return
model.insert(-1, (shortcut,))
def __get_shortcut_from_buttons(self):
modifiers = []
keycode = self.__keycode_entry.get_text()
if gdk.keyval_from_name(keycode) == 0:
return None
for name, button, mask in self.__modifier_buttons:
if button.get_active():
modifiers.append(name)
if keycode.startswith("_"):
keycode = keycode[1:]
keys = modifiers + [keycode]
shortcut = "+".join(keys)
return shortcut
def __set_shortcut_to_buttons(self, shortcut):
keys = shortcut.split("+")
mods = keys[:-1]
for name, button, mask in self.__modifier_buttons:
if name in mods:
button.set_active(True)
else:
button.set_active(False)
self.__keycode_entry.set_text(keys[-1])
def __get_selected_shortcut(self):
model = self.__shortcut_view.get_model()
path, column = self.__shortcut_view.get_cursor()
if path == None:
return None
return model[path[0]][0]
def __set_selected_shortcut(self, shortcut):
model = self.__shortcut_view.get_model()
path, column = self.__shortcut_view.get_cursor()
model[path[0]][0] = shortcut
def __del_selected_shortcut(self):
model = self.__shortcut_view.get_model()
path, column = self.__shortcut_view.get_cursor()
del model[path[0]]
def __shortcut_view_cursor_changed_cb(self, treeview):
shortcut = self.__get_selected_shortcut()
self.__set_shortcut_to_buttons(shortcut)
if shortcut != None:
self.__delete_button.set_sensitive(True)
else:
self.__delete_button.set_sensitive(False)
def __modifier_button_toggled_cb(self, button, name):
shortcut = self.__get_shortcut_from_buttons()
selected_shortcut = self.__get_selected_shortcut()
self.__add_button.set_sensitive(shortcut != None)
can_apply = shortcut != selected_shortcut and shortcut != None and selected_shortcut != None
self.__apply_button.set_sensitive(can_apply)
def __keycode_entry_notify_cb(self, entry, arg):
shortcut = self.__get_shortcut_from_buttons()
selected_shortcut = self.__get_selected_shortcut()
self.__add_button.set_sensitive(shortcut != None)
can_apply = shortcut != selected_shortcut and shortcut != None and selected_shortcut != None
self.__apply_button.set_sensitive(can_apply)
def __keycode_button_clicked_cb(self, button):
out = []
dlg = gtk.MessageDialog(parent = self.get_toplevel(), buttons = gtk.BUTTONS_CLOSE)
message = _("Please press a key (or a key combination).\nThe dialog will be closed when the key is released.")
dlg.set_markup(message)
dlg.set_title(_("Please press a key (or a key combination)"))
def __key_release_event(d, k, out):
out.append(k.copy())
d.response(gtk.RESPONSE_OK)
dlg.connect("key-release-event", __key_release_event, out)
id = dlg.run()
dlg.destroy()
if id != gtk.RESPONSE_OK or not out:
return
keyevent = out[0]
for name, button, mask in self.__modifier_buttons:
if keyevent.state & mask:
button.set_active(True)
else:
button.set_active(False)
self.__keycode_entry.set_text(gdk.keyval_name(keyevent.keyval))
def __add_button_clicked_cb(self, button):
shortcut = self.__get_shortcut_from_buttons()
self.add_shortcut(shortcut)
def __apply_button_clicked_cb(self, button):
shortcut = self.__get_shortcut_from_buttons()
self.__set_selected_shortcut(shortcut)
def __delete_button_clicked_cb(self, button):
self.__del_selected_shortcut()
self.__delete_button.set_sensitive(False)
self.__apply_button.set_sensitive(False)
class KeyboardShortcutSelectionDialog(gtk.Dialog):
def __init__(self, title = None, parent = None, flags = 0, buttons = None):
super(KeyboardShortcutSelectionDialog, self).__init__(title, parent, flags, buttons)
self.__selection_view = KeyboardShortcutSelection()
self.vbox.pack_start(self.__selection_view)
self.vbox.show_all()
def set_shortcuts(self, shotrcuts = None):
self.__selection_view.set_shortcuts(shotrcuts)
def add_shortcut(self, shotrcut):
self.__selection_view.add_shortcut(shotrcut)
def get_shortcuts(self):
return self.__selection_view.get_shortcuts()
if __name__ == "__main__":
dlg = KeyboardShortcutSelectionDialog(
title = "Select test",
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK))
dlg.add_shortcut("Control+Shift+space")
dlg.set_shortcuts(None)
print dlg.run()
print dlg.get_shortcuts()
Add release mask if the hotkey is a signal key.
# vim:set et sts=4 sw=4:
#
# ibus - The Input Bus
#
# Copyright (c) 2007-2008 Huang Peng <shawn.p.huang@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
__all__ = (
"KeyboardShortcutSelection",
"KeyboardShortcutSelectionDialog",
);
import gobject
import gtk
from gtk import gdk
from gtk import keysyms
from gettext import dgettext
_ = lambda a : dgettext("ibus", a)
N_ = lambda a : a
class KeyboardShortcutSelection(gtk.VBox):
def __init__(self, shortcuts = None):
super(KeyboardShortcutSelection, self).__init__()
self.__init_ui()
self.set_shortcuts(shortcuts)
def __init_ui(self):
# label = gtk.Label(_("Keyboard shortcuts:"))
# label.set_justify(gtk.JUSTIFY_LEFT)
# label.set_alignment(0.0, 0.5)
# self.pack_start(label, False, True, 4)
# shortcuts view
viewport = gtk.Viewport()
viewport.set_shadow_type(gtk.SHADOW_IN)
self.__shortcut_view = gtk.TreeView(gtk.ListStore(gobject.TYPE_STRING))
self.__shortcut_view.set_size_request(-1, 100)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn(_("Keyboard shortcuts"), renderer, text = 0)
self.__shortcut_view.append_column(column)
self.__shortcut_view.connect("cursor-changed", self.__shortcut_view_cursor_changed_cb)
viewport.add(self.__shortcut_view)
self.pack_start(viewport, True, True, 4)
# key code
hbox = gtk.HBox()
label = gtk.Label(_("Key code:"))
label.set_justify(gtk.JUSTIFY_LEFT)
label.set_alignment(0.0, 0.5)
hbox.pack_start(label, False, True, 4)
self.__keycode_entry = gtk.Entry()
self.__keycode_entry.connect("notify::text", self.__keycode_entry_notify_cb)
hbox.pack_start(self.__keycode_entry, True, True, 4)
self.__keycode_button = gtk.Button("...")
self.__keycode_button.connect("clicked", self.__keycode_button_clicked_cb)
hbox.pack_start(self.__keycode_button, False, True, 4)
self.pack_start(hbox, False, True, 4)
# modifiers
hbox = gtk.HBox()
label = gtk.Label(_("Modifiers:"))
label.set_justify(gtk.JUSTIFY_LEFT)
label.set_alignment(0.0, 0.5)
hbox.pack_start(label, False, True, 4)
table = gtk.Table(4, 2)
self.__modifier_buttons = []
self.__modifier_buttons.append(("Control", gtk.CheckButton("_Control"), gdk.CONTROL_MASK))
self.__modifier_buttons.append(("Alt", gtk.CheckButton("A_lt"), gdk.MOD1_MASK))
self.__modifier_buttons.append(("Shift", gtk.CheckButton("_Shift"), gdk.SHIFT_MASK))
self.__modifier_buttons.append(("Meta", gtk.CheckButton("_Meta"), gdk.META_MASK))
self.__modifier_buttons.append(("Super", gtk.CheckButton("S_uper"), gdk.SUPER_MASK))
self.__modifier_buttons.append(("Hyper", gtk.CheckButton("_Hyper"), gdk.HYPER_MASK))
self.__modifier_buttons.append(("Capslock", gtk.CheckButton("Capsloc_k"), gdk.LOCK_MASK))
self.__modifier_buttons.append(("Release", gtk.CheckButton("_Release"), gdk.RELEASE_MASK))
for name, button, mask in self.__modifier_buttons:
button.connect("toggled", self.__modifier_button_toggled_cb, name)
table.attach(self.__modifier_buttons[0][1], 0, 1, 0, 1)
table.attach(self.__modifier_buttons[1][1], 1, 2, 0, 1)
table.attach(self.__modifier_buttons[2][1], 2, 3, 0, 1)
table.attach(self.__modifier_buttons[3][1], 3, 4, 0, 1)
table.attach(self.__modifier_buttons[4][1], 0, 1, 1, 2)
table.attach(self.__modifier_buttons[5][1], 1, 2, 1, 2)
table.attach(self.__modifier_buttons[6][1], 2, 3, 1, 2)
table.attach(self.__modifier_buttons[7][1], 3, 4, 1, 2)
hbox.pack_start(table, True, True, 4)
self.pack_start(hbox, False, True, 4)
# buttons
hbox = gtk.HBox()
# add button
self.__add_button = gtk.Button(stock = gtk.STOCK_ADD)
self.__add_button.connect("clicked", self.__add_button_clicked_cb)
hbox.pack_start(self.__add_button)
# apply button
self.__apply_button = gtk.Button(stock = gtk.STOCK_APPLY)
self.__apply_button.connect("clicked", self.__apply_button_clicked_cb)
hbox.pack_start(self.__apply_button)
# delete button
self.__delete_button = gtk.Button(stock = gtk.STOCK_DELETE)
self.__delete_button.connect("clicked", self.__delete_button_clicked_cb)
hbox.pack_start(self.__delete_button)
self.pack_start(hbox, False, True, 4)
def set_shortcuts(self, shortcuts = None):
if shortcuts == None:
shortcuts = []
model = self.__shortcut_view.get_model()
model.clear()
for shortcut in shortcuts:
model.insert(0, (shortcut,))
def get_shortcuts(self):
model = self.__shortcut_view.get_model()
return map(lambda i:i[0] , model)
def add_shortcut(self, shortcut):
model = self.__shortcut_view.get_model()
if len(model) >= 6:
return
model.insert(-1, (shortcut,))
def __get_shortcut_from_buttons(self):
modifiers = []
keycode = self.__keycode_entry.get_text()
if gdk.keyval_from_name(keycode) == 0:
return None
for name, button, mask in self.__modifier_buttons:
if button.get_active():
modifiers.append(name)
if keycode.startswith("_"):
keycode = keycode[1:]
keys = modifiers + [keycode]
shortcut = "+".join(keys)
return shortcut
def __set_shortcut_to_buttons(self, shortcut):
keys = shortcut.split("+")
mods = keys[:-1]
for name, button, mask in self.__modifier_buttons:
if name in mods:
button.set_active(True)
else:
button.set_active(False)
self.__keycode_entry.set_text(keys[-1])
def __get_selected_shortcut(self):
model = self.__shortcut_view.get_model()
path, column = self.__shortcut_view.get_cursor()
if path == None:
return None
return model[path[0]][0]
def __set_selected_shortcut(self, shortcut):
model = self.__shortcut_view.get_model()
path, column = self.__shortcut_view.get_cursor()
model[path[0]][0] = shortcut
def __del_selected_shortcut(self):
model = self.__shortcut_view.get_model()
path, column = self.__shortcut_view.get_cursor()
del model[path[0]]
def __shortcut_view_cursor_changed_cb(self, treeview):
shortcut = self.__get_selected_shortcut()
self.__set_shortcut_to_buttons(shortcut)
if shortcut != None:
self.__delete_button.set_sensitive(True)
else:
self.__delete_button.set_sensitive(False)
def __modifier_button_toggled_cb(self, button, name):
shortcut = self.__get_shortcut_from_buttons()
selected_shortcut = self.__get_selected_shortcut()
self.__add_button.set_sensitive(shortcut != None)
can_apply = shortcut != selected_shortcut and shortcut != None and selected_shortcut != None
self.__apply_button.set_sensitive(can_apply)
def __keycode_entry_notify_cb(self, entry, arg):
shortcut = self.__get_shortcut_from_buttons()
selected_shortcut = self.__get_selected_shortcut()
self.__add_button.set_sensitive(shortcut != None)
can_apply = shortcut != selected_shortcut and shortcut != None and selected_shortcut != None
self.__apply_button.set_sensitive(can_apply)
def __keycode_button_clicked_cb(self, button):
out = []
dlg = gtk.MessageDialog(parent = self.get_toplevel(), buttons = gtk.BUTTONS_CLOSE)
message = _("Please press a key (or a key combination).\nThe dialog will be closed when the key is released.")
dlg.set_markup(message)
dlg.set_title(_("Please press a key (or a key combination)"))
def __key_release_event(d, k, out):
out.append(k.copy())
d.response(gtk.RESPONSE_OK)
dlg.connect("key-release-event", __key_release_event, out)
id = dlg.run()
dlg.destroy()
if id != gtk.RESPONSE_OK or not out:
return
keyevent = out[0]
state = keyevent.state & (gdk.CONTROL_MASK | \
gdk.SHIFT_MASK | \
gdk.MOD1_MASK | \
gdk.META_MASK | \
gdk.SUPER_MASK | \
gdk.HYPER_MASK)
if state == 0:
state = state | gdk.RELEASE_MASK
elif keyevent.keyval in (keysyms.Control_L, keysyms.Control_R) and state == gdk.CONTROL_MASK:
state = state | gdk.RELEASE_MASK
elif keyevent.keyval in (keysyms.Shift_L, keysyms.Shift_R) and state == gdk.SHIFT_MASK:
state = state | gdk.RELEASE_MASK
elif keyevent.keyval in (keysyms.Alt_L, keysyms.Alt_R) and state == gdk.MOD1_MASK:
state = state | gdk.RELEASE_MASK
elif keyevent.keyval in (keysyms.Meta_L, keysyms.Meta_R) and state == gdk.META_MASK:
state = state | gdk.RELEASE_MASK
elif keyevent.keyval in (keysyms.Super_L, keysyms.Super_R) and state == gdk.SUPER_MASK:
state = state | gdk.RELEASE_MASK
elif keyevent.keyval in (keysyms.Hyper_L, keysyms.Hyper_R) and state == gdk.HYPER_MASK:
state = state | gdk.RELEASE_MASK
for name, button, mask in self.__modifier_buttons:
if state & mask:
button.set_active(True)
else:
button.set_active(False)
self.__keycode_entry.set_text(gdk.keyval_name(keyevent.keyval))
def __add_button_clicked_cb(self, button):
shortcut = self.__get_shortcut_from_buttons()
self.add_shortcut(shortcut)
def __apply_button_clicked_cb(self, button):
shortcut = self.__get_shortcut_from_buttons()
self.__set_selected_shortcut(shortcut)
def __delete_button_clicked_cb(self, button):
self.__del_selected_shortcut()
self.__delete_button.set_sensitive(False)
self.__apply_button.set_sensitive(False)
class KeyboardShortcutSelectionDialog(gtk.Dialog):
def __init__(self, title = None, parent = None, flags = 0, buttons = None):
super(KeyboardShortcutSelectionDialog, self).__init__(title, parent, flags, buttons)
self.__selection_view = KeyboardShortcutSelection()
self.vbox.pack_start(self.__selection_view)
self.vbox.show_all()
def set_shortcuts(self, shotrcuts = None):
self.__selection_view.set_shortcuts(shotrcuts)
def add_shortcut(self, shotrcut):
self.__selection_view.add_shortcut(shotrcut)
def get_shortcuts(self):
return self.__selection_view.get_shortcuts()
if __name__ == "__main__":
dlg = KeyboardShortcutSelectionDialog(
title = "Select test",
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK))
dlg.add_shortcut("Control+Shift+space")
dlg.set_shortcuts(None)
print dlg.run()
print dlg.get_shortcuts()
|
# encoding: utf-8
"""
Copyright (c) 2012 - 2015, Ernesto Ruge
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import json
import util
import db
import datetime
import time
import sys
from flask import Flask
from flask import abort
from flask import render_template
from flask import make_response
from flask import request
from flask import session
from flask import redirect
from flask import Response
from collections import OrderedDict
from bson import ObjectId, DBRef
import werkzeug
from webapp import app
####################################################
# system
####################################################
@app.route('/oparl')
def oparl_general():
return oparl_basic(lambda params: {
"id": "https://1-0.oparl.politik-bei-uns.de/oparl",
"type": "http://oparl.org/schema/1.0/System",
"oparlVersion": "http://oparl.org/specs/1.0/",
"otherOparlVersions": [],
"name": "OKF-DE OParl Service",
"body": "%s/oparl/body%s" % (app.config['api_url'], generate_postfix(params)),
"contactEmail": "kontakt@politik-bei-uns.de",
"contactName": "Ernesto Ruge, Open Knowledge Foundation Deutschland e.V.",
"website": "http://politik-bei-uns.de/",
"vendor": "http://politik-bei-uns.de/",
"product": "http://politik-bei-uns.de/",
"created": "2015-01-01T00:00:00+01:00",
"modified": "2015-01-01T00:00:00+01:00",
"web": "https://politik-bei-uns.de/"
})
####################################################
# body
####################################################
# body list
@app.route('/oparl/body')
def oparl_bodies():
return oparl_basic(oparl_bodies_data)
def oparl_bodies_data(params):
return db.get_body(add_prefix = "%s/oparl/body/" % app.config['api_url'],
add_postfix=generate_postfix(params))
def oparl_bodies_data(params):
search_params = {}
if 'q' in params:
search_params['modified'] = { '$lt': datetime.datetime.strptime(params['q'].split(':<')[1], "%Y-%m-%dT%H:%M:%S.%f") }
data = db.get_body(search_params = search_params, limit=app.config['oparl_items_per_page'])
result_count = db.get_body_count(search_params=search_params)
data = {
'data': data,
'pagination': {
'elementsPerPage': app.config['oparl_items_per_page']
},
'links': {
}
}
if result_count > app.config['oparl_items_per_page']:
data['links']['next'] = '%s/oparl/body%s' % (app.config['api_url'], generate_postfix(params, ['q=modified:<%s' % datetime.datetime.strptime(data['data'][9]['modified'], "%Y-%m-%dT%H:%M:%S.%f+00:00").strftime("%Y-%m-%dT%H:%M:%S.%f")]))
if 'modified' in search_params:
data['links']['first'] = '%s/oparl/body%s' % (app.config['api_url'], generate_postfix(params))
for key, single in enumerate(data['data']):
data['data'][key] = oparl_body_layout(data=single, params=params)
return data
# single body
@app.route('/oparl/body/<string:body_id>')
def oparl_body(body_id):
return oparl_basic(oparl_body_data, params={'_id': body_id})
def oparl_body_data(params):
data = db.get_body(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
return oparl_body_layout(data=data[0], params=params)
elif len(data) == 0:
abort(404)
def oparl_body_layout(data, params):
# default values
data['id'] = "%s/oparl/body/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'https://oparl.org/schema/1.0/Body'
data['created'] = datetime.datetime.strptime(data['created'], "%Y-%m-%dT%H:%M:%S.%f+00:00").strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = datetime.datetime.strptime(data['modified'], "%Y-%m-%dT%H:%M:%S.%f+00:00").strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
data['system'] = "%s/oparl%s" % (app.config['api_url'], generate_postfix(params))
data['organization'] = "%s/oparl/body/%s/organization%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['person'] = "%s/oparl/body/%s/person%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['meeting'] = "%s/oparl/body/%s/meeting%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['paper'] = "%s/oparl/body/%s/paper%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['legislativeTerm'] = []
# delete stuff
del data['config']
del data['_id']
return data
# body organization list
@app.route('/oparl/body/<string:body_id>/organization')
def oparl_body_organization(body_id):
return oparl_basic(oparl_body_organization_data,
params={'body_id':body_id})
def oparl_body_organization_data(params):
search_params = oparl_generate_list_search_params(params)
data = db.get_organization(search_params = search_params, limit=app.config['oparl_items_per_page'])
data = oparl_generate_list_items(params=params,
search_params=search_params,
result_count=db.get_organization_count(search_params=search_params),
data=data,
type='organization')
for key, single in enumerate(data['data']):
data['data'][key] = oparl_organization_layout(data=single, params=params)
return data
# body person list
@app.route('/oparl/body/<string:body_id>/person')
def oparl_body_person(body_id):
return oparl_basic(oparl_body_person_data,
params={'body_id': body_id})
def oparl_body_person_data(params):
search_params = oparl_generate_list_search_params(params)
data = db.get_person(search_params = search_params,
limit=app.config['oparl_items_per_page'],
deref={'values': ['membership']})
data = oparl_generate_list_items(params=params,
search_params=search_params,
result_count=db.get_person_count(search_params=search_params),
data=data,
type='person')
for key_person, single_person in enumerate(data['data']):
data['data'][key_person] = oparl_person_layout(data=single_person, params=params)
return data
# body meeting list
@app.route('/oparl/body/<string:body_id>/meeting')
def oparl_body_meeting(body_id):
return oparl_basic(oparl_body_meeting_data,
params = {'body_id': body_id})
def oparl_body_meeting_data(params):
search_params = oparl_generate_list_search_params(params)
data = db.get_meeting(search_params = search_params,
limit=app.config['oparl_items_per_page'],
deref={'values': ['invitation', 'resultsProtocol', 'agendaItem', 'auxiliaryFile']})
data = oparl_generate_list_items(params=params,
search_params=search_params,
result_count=db.get_meeting_count(search_params=search_params),
data=data,
type='meeting')
for key_meeting, single_meeting in enumerate(data['data']):
data['data'][key_meeting] = oparl_meeting_layout(data=single_meeting, params=params)
return data
# body paper list
@app.route('/oparl/body/<string:body_id>/paper')
def oparl_body_paper(body_id):
return oparl_basic(oparl_body_paper_data,
params={'body_id': body_id})
def oparl_body_paper_data(params):
search_params = oparl_generate_list_search_params(params)
data = db.get_paper(search_params = search_params,
limit=app.config['oparl_items_per_page'],
deref={'values': ['mainFile', 'auxiliaryFile', 'consultation', 'location', 'originatorPerson', 'originatorOrganization']})
data = oparl_generate_list_items(params=params,
search_params=search_params,
result_count=db.get_paper_count(search_params=search_params),
data=data,
type='paper')
for key_paper, single_paper in enumerate(data['data']):
data['data'][key_paper] = oparl_paper_layout(data=single_paper, params=params)
return data
####################################################
# organization
####################################################
# single organization
@app.route('/oparl/organization/<string:organization_id>')
def oparl_organization(organization_id):
return oparl_basic(oparl_organization_data, params={'_id':organization_id})
def oparl_organization_data(params):
data = db.get_organization(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
return oparl_organization_layout(data[0], params)
else:
abort(404)
def oparl_organization_layout(data, params):
# default values
data['id'] = "%s/oparl/organization/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'https://oparl.org/schema/1.0/Organization'
data['body'] = "%s/oparl/body/%s%s" % (app.config['api_url'], data['body'].id, generate_postfix(params))
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
if 'startDate' in data:
if isinstance(data['startDate'], datetime.datetime):
data['startDate'] = data['startDate'].strftime("%Y-%m-%d")
if 'endDate' in data:
if isinstance(data['endDate'], datetime.datetime):
data['endDate'] = data['endDate'].strftime("%Y-%m-%d")
data['membership'] = generate_backref_list(db.get_membership(search_params={'organization': DBRef('organization', ObjectId(data['_id']))}), params)
data['meeting'] = "%s/oparl/organization/%s/meeting%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
@app.route('/oparl/organization/<string:organization_id>/meeting')
def oparl_organization_meeting(organization_id):
return oparl_basic(oparl_organization_meeting_data, params={'_id':organization_id})
def oparl_organization_meeting_data(params):
meetings = db.get_meeting(search_params={'organization': DBRef('organization', ObjectId(params['_id']))})
result = []
for meeting in meetings:
result.append(oparl_meeting_layout(meeting, params))
return result
####################################################
# membership
####################################################
# single membership
@app.route('/oparl/membership/<string:membership_id>')
def oparl_membership(membership_id):
return oparl_basic(oparl_membership_data, params={'_id': membership_id})
def oparl_membership_data(params):
data = db.get_membership(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
return oparl_membership_layout(data[0], params)
elif len(data) == 0:
abort(404)
def oparl_membership_layout(data, params):
# default values
data['id'] = "%s/oparl/membership/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/Membership'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
if 'startDate' in data:
if isinstance(data['startDate'], datetime.datetime):
data['startDate'] = data['startDate'].strftime("%Y-%m-%d")
if 'endDate' in data:
if isinstance(data['endDate'], datetime.datetime):
data['endDate'] = data['endDate'].strftime("%Y-%m-%d")
data['organization'] = "%s/oparl/organization/%s%s" % (app.config['api_url'], data['organization'].id, generate_postfix(params))
data['person'] = "%s/oparl/person/%s%s" % (app.config['api_url'], db.get_person(search_params={'membership': DBRef('membership', ObjectId(data['_id']))})[0]['_id'], generate_postfix(params))
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
####################################################
# person
####################################################
# single person
@app.route('/oparl/person/<string:person_id>')
def oparl_person(person_id):
return oparl_basic(oparl_person_data, params={'_id': person_id})
def oparl_person_data(params):
data = db.get_person(search_params={'_id': ObjectId(params['_id'])},
deref={'values': ['membership']})
if len(data) == 1:
return oparl_person_layout(data[0], params)
elif len(data) == 0:
abort(404)
def oparl_person_layout(data, params):
# default values
data['id'] = "%s/oparl/person/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/Person'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
memberships = []
for single_membership in data['membership']:
memberships.append(oparl_membership_layout(single_membership, params))
data['membership'] = memberships
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
####################################################
# meeting
####################################################
# single meeting
@app.route('/oparl/meeting/<string:meeting_id>')
def oparl_meeting(meeting_id):
return oparl_basic(oparl_meeting_data, params={'_id': meeting_id})
def oparl_meeting_data(params):
data = db.get_meeting(search_params={'_id': ObjectId(params['_id'])},
deref={'values': ['invitation', 'resultsProtocol', 'agendaItem', 'auxiliaryFile']})
if len(data) == 1:
return oparl_meeting_layout(data[0], params)
elif len(data) == 0:
abort(404)
def oparl_meeting_layout(data, params):
# default values
data['id'] = "%s/oparl/meeting/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/Meeting'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
if 'start' in data:
if isinstance(data['start'], datetime.datetime):
data['start'] = data['start'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
if 'end' in data:
if isinstance(data['end'], datetime.datetime):
data['end'] = data['end'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
if 'address' in data:
data['PolitikBeiUns:address'] = data['address']
del data['address']
if 'room' in data:
data['PolitikBeiUns:room'] = data['room']
del data['room']
# if invitation is list -> Bug
if 'invitation' in data:
if isinstance(data['invitation'], list):
# invitation is list -> Bug
if len(data['invitation']):
data['invitation'] = data['invitation'][0]
else:
del data['invitation']
if 'invitation' in data:
if data['invitation']:
data['invitation'] = oparl_file_layout(data['invitation'], params)
else:
del data['invitation']
if 'resultsProtocol' in data:
if data['resultsProtocol']:
data['resultsProtocol'] = oparl_file_layout(data['resultsProtocol'], params)
else:
del data['resultsProtocol']
if 'verbatimProtocol' in data:
if data['verbatimProtocol']:
data['verbatimProtocol'] = oparl_file_layout(data['verbatimProtocol'], params)
else:
del data['verbatimProtocol']
if 'participant' in data:
data['membership'] = generate_backref_list(data['participant'], params)
if 'auxiliaryFile' in data:
auxiliaryFiles = []
for single_auxiliaryFile in data['auxiliaryFile']:
if single_auxiliaryFile:
auxiliaryFiles.append(oparl_file_layout(single_auxiliaryFile, params))
if len(auxiliaryFiles):
data['auxiliaryFile'] = auxiliaryFiles
else:
del data['auxiliaryFile']
if 'agendaItem' in data:
agendaItems = []
for single_agendaItem in data['agendaItem']:
if single_agendaItem:
agendaItems.append(oparl_agendaItem_layout(single_agendaItem, params))
if len(agendaItems):
data['agendaItem'] = agendaItems
else:
del data['agendaItem']
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
####################################################
# agendaItem
####################################################
# single agendaItem
@app.route('/oparl/agendaItem/<string:agendaItem_id>')
def oparl_agendaItem(agendaItem_id):
return oparl_basic(oparl_agendaItem_data, params={'_id': agendaItem_id})
def oparl_agendaItem_data(params):
data = db.get_agendaItem(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
data = oparl_agendaItem_layout(data[0], params)
meeting = db.get_meeting(search_params={'agendaItem': DBRef('agendaItem', ObjectId(params['_id']))})
if len(meeting):
data['meeting'] = "%s/oparl/meeting/%s%s" % (app.config['api_url'], meeting[0]['_id'], generate_postfix(params))
return data
elif len(data) == 0:
abort(404)
def oparl_agendaItem_layout(data, params):
# default values
data['id'] = "%s/oparl/agendaItem/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/AgendaItem'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
if 'start' in data:
if isinstance(data['start'], datetime.datetime):
data['start'] = data['start'].strftime("%Y-%m-%dT%H:%M:%S")
if 'end' in data:
if isinstance(data['end'], datetime.datetime):
data['end'] = data['end'].strftime("%Y-%m-%dT%H:%M:%S")
if 'consultation' in data:
data['consultation'] = "%s/oparl/consultation/%s%s" % (app.config['api_url'], data['consultation'].id, generate_postfix(params))
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
####################################################
# consultation
####################################################
# single consultation
@app.route('/oparl/consultation/<string:consultation_id>')
def oparl_consultation(consultation_id):
return oparl_basic(oparl_consultation_data, params={'_id': consultation_id})
def oparl_consultation_data(params):
data = db.get_consultation(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
data = oparl_consultation_layout(data[0], params)
agendaItem = db.get_agendaItem(search_params={'consultation': DBRef('consultation', ObjectId(params['_id']))})
if len(agendaItem):
data['agendaItem'] = "%s/oparl/agendaItem/%s%s" % (app.config['api_url'], agendaItem[0]['_id'], generate_postfix(params))
return data
elif len(data) == 0:
abort(404)
def oparl_consultation_layout(data, params):
# default values
data['id'] = "%s/oparl/consultation/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/Consultation'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
if 'publishedDate' in data:
if isinstance(data['publishedDate'], datetime.datetime):
data['publishedDate'] = data['publishedDate'].strftime("%Y-%m-%d")
if 'paper' in data:
data['paper'] = "%s/oparl/paper/%s%s" % (app.config['api_url'], data['paper'].id, generate_postfix(params))
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
####################################################
# paper
####################################################
# single paper
@app.route('/oparl/paper/<string:paper_id>')
def oparl_paper(paper_id):
return oparl_basic(oparl_paper_data, params={'_id': paper_id})
def oparl_paper_data(params):
data = db.get_paper(search_params={'_id': ObjectId(params['_id'])},
deref={'values': ['mainFile', 'auxiliaryFile', 'consultation', 'location', 'originatorPerson', 'originatorOrganization']})
if len(data) == 1:
return oparl_paper_layout(data[0], params)
elif len(data) == 0:
abort(404)
def oparl_paper_layout(data, params):
# default values
data['id'] = "%s/oparl/paper/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/Paper'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# mainFile
if 'mainFile' in data:
if data['mainFile']:
data['mainFile'] = oparl_file_layout(data['mainFile'], params)
else:
del data['mainFile']
# auxiliaryFiles
if 'auxiliaryFile' in data:
auxiliaryFiles = []
for single_auxiliaryFile in data['auxiliaryFile']:
if single_auxiliaryFile:
auxiliaryFiles.append(oparl_file_layout(single_auxiliaryFile, params))
if len(auxiliaryFiles):
data['auxiliaryFile'] = auxiliaryFiles
else:
del data['data']
data['consultation'] = []
consultations = db.get_consultation(search_params={'paper': DBRef('paper', ObjectId(data['_id']))})
for consultation in consultations:
data['consultation'].append(oparl_consultation_layout(consultation, params))
if len(data['consultation']) == 0:
del data['consultation']
# additional transformations
if 'publishedDate' in data:
if isinstance(data['publishedDate'], datetime.datetime):
data['PolitikBeiUns:publishedDate'] = data['publishedDate'].strftime("%Y-%m-%d")
del data['publishedDate']
# TODO for data model
if 'georeferences' in data:
del data['georeferences']
if 'georeferences' in data:
del data['georeferencesGenerated']
if 'title' in data:
del data['title']
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
if 'nameShort' in data:
data['reference'] = data['nameShort']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
if 'nameShort' in data:
del data['nameShort']
if 'georeferencesGenerated' in data:
del data['georeferencesGenerated']
return data
####################################################
# file
####################################################
# single file
@app.route('/oparl/file/<string:file_id>')
def oparl_document(file_id):
return oparl_basic(oparl_file_data, params={'_id': file_id})
def oparl_file_data(params):
data = db.get_file(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
data = oparl_file_layout(data[0], params)
# Get Backrefs for Meeting
data['meeting'] = []
meeting = db.get_meeting(search_params={'invitation': DBRef('file', ObjectId(params['_id']))})
if len(meeting):
data['meeting'].append("%s/oparl/meeting/%s%s" % (app.config['api_url'], meeting[0]['_id'], generate_postfix(params)))
meeting = db.get_meeting(search_params={'resultsProtocol': DBRef('file', ObjectId(params['_id']))})
if len(meeting):
data['meeting'].append("%s/oparl/meeting/%s%s" % (app.config['api_url'], meeting[0]['_id'], generate_postfix(params)))
meeting = db.get_meeting(search_params={'verbatimProtocol': DBRef('file', ObjectId(params['_id']))})
if len(meeting):
data['meeting'].append("%s/oparl/meeting/%s%s" % (app.config['api_url'], meeting[0]['_id'], generate_postfix(params)))
meeting = db.get_meeting(search_params={'verbatimProtocol': DBRef('auxiliaryFile', ObjectId(params['_id']))})
for single_meeting in meeting:
data['meeting'].append("%s/oparl/meeting/%s%s" % (app.config['api_url'], single_meeting['_id'], generate_postfix(params)))
if len(data['meeting']) == 0:
del data['meeting']
# Get Backrefs for AgendaItem
data['agendaItem'] = []
agendaItem = db.get_agendaItem(search_params={'resolutionFile': DBRef('file', ObjectId(params['_id']))})
if len(agendaItem):
data['agendaItem'].append("%s/oparl/agendaItem/%s%s" % (app.config['api_url'], agendaItem[0]['_id'], generate_postfix(params)))
agendaItem = db.get_agendaItem(search_params={'auxiliaryFile': DBRef('file', ObjectId(params['_id']))})
for single_agendaItem in agendaItem:
data['agendaItem'].append("%s/oparl/agendaItem/%s%s" % (app.config['api_url'], single_agendaItem['_id'], generate_postfix(params)))
if len(data['agendaItem']) == 0:
del data['agendaItem']
# Get Backrefs for Paper
data['paper'] = []
paper = db.get_agendaItem(search_params={'mainFile': DBRef('file', ObjectId(params['_id']))})
if len(paper):
data['paper'].append("%s/oparl/paper/%s%s" % (app.config['api_url'], paper[0]['_id'], generate_postfix(params)))
paper = db.get_agendaItem(search_params={'auxiliaryFile': DBRef('file', ObjectId(params['_id']))})
for single_paper in paper:
data['paper'].append("%s/oparl/paper/%s%s" % (app.config['api_url'], single_paper['_id'], generate_postfix(params)))
if len(data['paper']) == 0:
del data['paper']
return data
elif len(data) == 0:
abort(404)
def oparl_file_layout(data, params):
# default values
data['id'] = "%s/oparl/file/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'https://oparl.org/schema/1.0/File'
data['body'] = "%s/oparl/body/%s%s" % (app.config['api_url'], data['body'].id, generate_postfix(params))
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
data['accessUrl'] = "%s/oparl/file/%s/access%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['downloadUrl'] = "%s/oparl/file/%s/download%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
if 'date' in data:
if isinstance(data['date'], datetime.datetime):
data['date'] = data['date'].strftime("%Y-%m-%d")
# TODO: rename stuff
if 'fulltext' in data:
data['text'] = data['fulltext']
del data['fulltext']
if 'mimetype' in data:
data['mimeType'] = data['mimetype']
del data['mimetype']
if 'filename' in data:
data['fileName'] = data['filename']
del data['filename']
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
if 'originalDownloadPossible' in data:
data['PolitikBeiUns:originalDownloadPossible'] = data['originalDownloadPossible']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'originalDownloadPossible' in data:
del data['originalDownloadPossible']
if 'file' in data:
del data['file']
if 'thumbnails' in data:
del data['thumbnails']
if 'fulltextGenerated' in data:
del data['fulltextGenerated']
if 'thumbnailsGenerated' in data:
del data['thumbnailsGenerated']
return data
# file accessUrl
@app.route('/oparl/file/<string:file_id>/access')
def oparl_file_accessUrl(file_id):
return oparl_basic(oparl_file_accessUrl_data, params={'file_id': file_id}, direct_output=True)
def oparl_file_accessUrl_data(params):
file_data = db.get_file(deref={'values': ['file']},
search_params={'_id': ObjectId(params['file_id'])})
if len(file_data) == 0:
# TODO: Rendere informativere 404 Seite
abort(404)
file_data = file_data[0]
# extension doesn't match file extension (avoiding arbitrary URLs)
#proper_extension = attachment_info['filename'].split('.')[-1]
#if proper_extension != extension:
# abort(404)
# 'file' property is not set (e.g. due to depublication)
if 'file' not in file_data:
if 'depublication' in file_data:
abort(410) # Gone
else:
# TODO: log this as unexplicable...
abort(500)
# handle conditional GET
#if 'If-Modified-Since' in request.headers:
# file_date = attachment_info['file']['uploadDate'].replace(tzinfo=None)
# request_date = util.parse_rfc1123date(request.headers['If-Modified-Since'])
# difference = file_date - request_date
# if difference < datetime.timedelta(0, 1): # 1 second
# return Response(status=304)
#if 'if-none-match' in request.headers:
# print "Conditional GET: If-None-Match"
# TODO: handle ETag in request
handler = db.get_file_data(file_data['file']['_id'])
response = make_response(handler.read(), 200)
response.mimetype = file_data['mimetype']
response.headers['X-Robots-Tag'] = 'noarchive'
response.headers['ETag'] = file_data['sha1Checksum']
response.headers['Last-modified'] = util.rfc1123date(file_data['file']['uploadDate'])
response.headers['Expires'] = util.expires_date(hours=(24 * 30))
response.headers['Cache-Control'] = util.cache_max_age(hours=(24 * 30))
return response
# file downloadUrl
@app.route('/oparl/file/<string:file_id>/download')
def oparl_file_downloadUrl(file_id):
return oparl_basic(oparl_file_downloadUrl_data, params={'file_id': file_id}, direct_output=True)
def oparl_file_downloadUrl_data(params):
file_data = db.get_file(deref={'values': ['file']},
search_params={'_id': ObjectId(params['file_id'])})
if len(file_data) == 0:
# TODO: Rendere informativere 404 Seite
abort(404)
file_data = file_data[0]
# 'file' property is not set (e.g. due to depublication)
if 'file' not in file_data:
if 'depublication' in file_data:
abort(410) # Gone
else:
# TODO: log this as unexplicable...
abort(500)
handler = db.get_file_data(file_data['file']['_id'])
response = make_response(handler.read(), 200)
response.mimetype = file_data['mimetype']
response.headers['X-Robots-Tag'] = 'noarchive'
response.headers['ETag'] = file_data['sha1Checksum']
response.headers['Last-modified'] = util.rfc1123date(file_data['file']['uploadDate'])
response.headers['Expires'] = util.expires_date(hours=(24 * 30))
response.headers['Cache-Control'] = util.cache_max_age(hours=(24 * 30))
response.headers['Content-Disposition'] = 'attachment; filename=' + file_data['filename']
return response
####################################################
# misc
####################################################
def oparl_basic(content_fuction, params={}, direct_output=False):
start_time = time.time()
jsonp_callback = request.args.get('callback', None)
request_info = {}
html = request.args.get('html', False)
if html:
request_info['html'] = 1
extended_info = request.args.get('i')
extended_info = extended_info == '1'
if extended_info:
request_info['i'] = 1
search_query = request.args.get('q', "")
if search_query:
request_info['q'] = search_query
page = request.args.get('page')
try:
page = int(page)
except (ValueError, TypeError):
page = 1
request_info['page'] = page
params.update(request_info)
response = content_fuction(params)
if direct_output:
return response
if extended_info:
ret = {
'status': 0,
'duration': int((time.time() - start_time) * 1000),
'request': request_info,
'response': response
}
else:
ret = response
json_output = json.dumps(ret, cls=util.MyEncoder)#, sort_keys=True)
if jsonp_callback is not None:
json_output = jsonp_callback + '(' + json_output + ')'
if html:
return render_template('oparl.html', data=json.JSONDecoder(object_pairs_hook=OrderedDict).decode(json_output))
else:
response = make_response(json_output, 200)
response.mimetype = 'application/json'
response.headers['Expires'] = util.expires_date(hours=24)
response.headers['Cache-Control'] = util.cache_max_age(hours=24)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
def generate_postfix(params, additional_params=[]):
postfix = []
if 'html' in params:
postfix.append('html='+str(params['html']))
if 'p' in params:
if params['p'] > 1:
postfix.append('p='+str(params['p']))
if 'i' in params:
postfix.append('i='+str(params['i']))
postfix = postfix + additional_params
if len(postfix):
postfix = '?'+'&'.join(postfix)
else:
postfix = ''
return(postfix)
def generate_single_url(params={}, type='', id=''):
return "%s/oparl/%s/%s%s" % (app.config['api_url'], type, id, generate_postfix(params))
def generate_single_backref_url(params={}, get='', type='', reverse_type='', id=''):
get = getattr(db, get)
uid = str((get(search_params={reverse_type: DBRef(reverse_type, ObjectId(id))}, values={'_id':1}))[0]['_id'])
return "%s/oparl/%s/%s%s" % (app.config['api_url'], type, uid, generate_postfix(params))
def generate_backref_list(data, params):
result = []
for item in data:
result.append("%s/oparl/membership/%s%s" % (app.config['api_url'], item['_id'], generate_postfix(params)))
return result
def oparl_generate_list_search_params(params):
search_params = {'body': DBRef('body', ObjectId(params['body_id']))}
if 'q' in params:
search_params['modified'] = { '$lt': datetime.datetime.strptime(params['q'].split(':<')[1], "%Y-%m-%dT%H:%M:%S.%f") }
return search_params
def oparl_generate_list_items(params, search_params, result_count, data, type):
result = {
'data': data,
'pagination': {
'elementsPerPage': app.config['oparl_items_per_page']
},
'links': {
}
}
if result_count > app.config['oparl_items_per_page']:
result['links']['next'] = '%s/oparl/body/%s/%s%s' % (app.config['api_url'], params['body_id'], type, generate_postfix(params, ['q=modified:<%s' % result['data'][9]['modified'].strftime("%Y-%m-%dT%H:%M:%S.%f")]))
if 'modified' in search_params:
result['links']['first'] = '%s/oparl/body/%s/%s%s' % (app.config['api_url'], params['body_id'], type, generate_postfix(params))
return result
Fix of persistent html=1
# encoding: utf-8
"""
Copyright (c) 2012 - 2015, Ernesto Ruge
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import json
import util
import db
import datetime
import time
import sys
from flask import Flask
from flask import abort
from flask import render_template
from flask import make_response
from flask import request
from flask import session
from flask import redirect
from flask import Response
from collections import OrderedDict
from bson import ObjectId, DBRef
import werkzeug
from webapp import app
####################################################
# system
####################################################
@app.route('/oparl')
def oparl_general():
return oparl_basic(lambda params: {
"id": "https://1-0.oparl.politik-bei-uns.de/oparl",
"type": "http://oparl.org/schema/1.0/System",
"oparlVersion": "http://oparl.org/specs/1.0/",
"otherOparlVersions": [],
"name": "OKF-DE OParl Service",
"body": "%s/oparl/body%s" % (app.config['api_url'], generate_postfix(params)),
"contactEmail": "kontakt@politik-bei-uns.de",
"contactName": "Ernesto Ruge, Open Knowledge Foundation Deutschland e.V.",
"website": "http://politik-bei-uns.de/",
"vendor": "http://politik-bei-uns.de/",
"product": "http://politik-bei-uns.de/",
"created": "2015-01-01T00:00:00+01:00",
"modified": "2015-01-01T00:00:00+01:00",
"web": "https://politik-bei-uns.de/"
})
####################################################
# body
####################################################
# body list
@app.route('/oparl/body')
def oparl_bodies():
return oparl_basic(oparl_bodies_data)
def oparl_bodies_data(params):
return db.get_body(add_prefix = "%s/oparl/body/" % app.config['api_url'],
add_postfix=generate_postfix(params))
def oparl_bodies_data(params):
search_params = {}
if 'q' in params:
search_params['modified'] = { '$lt': datetime.datetime.strptime(params['q'].split(':<')[1], "%Y-%m-%dT%H:%M:%S.%f") }
data = db.get_body(search_params = search_params, limit=app.config['oparl_items_per_page'])
result_count = db.get_body_count(search_params=search_params)
data = {
'data': data,
'pagination': {
'elementsPerPage': app.config['oparl_items_per_page']
},
'links': {
}
}
if result_count > app.config['oparl_items_per_page']:
data['links']['next'] = '%s/oparl/body%s' % (app.config['api_url'], generate_postfix(params, ['q=modified:<%s' % datetime.datetime.strptime(data['data'][9]['modified'], "%Y-%m-%dT%H:%M:%S.%f+00:00").strftime("%Y-%m-%dT%H:%M:%S.%f")]))
if 'modified' in search_params:
data['links']['first'] = '%s/oparl/body%s' % (app.config['api_url'], generate_postfix(params))
for key, single in enumerate(data['data']):
data['data'][key] = oparl_body_layout(data=single, params=params)
return data
# single body
@app.route('/oparl/body/<string:body_id>')
def oparl_body(body_id):
return oparl_basic(oparl_body_data, params={'_id': body_id})
def oparl_body_data(params):
data = db.get_body(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
return oparl_body_layout(data=data[0], params=params)
elif len(data) == 0:
abort(404)
def oparl_body_layout(data, params):
# default values
data['id'] = "%s/oparl/body/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'https://oparl.org/schema/1.0/Body'
data['created'] = datetime.datetime.strptime(data['created'], "%Y-%m-%dT%H:%M:%S.%f+00:00").strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = datetime.datetime.strptime(data['modified'], "%Y-%m-%dT%H:%M:%S.%f+00:00").strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
data['system'] = "%s/oparl%s" % (app.config['api_url'], generate_postfix(params))
data['organization'] = "%s/oparl/body/%s/organization%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['person'] = "%s/oparl/body/%s/person%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['meeting'] = "%s/oparl/body/%s/meeting%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['paper'] = "%s/oparl/body/%s/paper%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['legislativeTerm'] = []
# delete stuff
del data['config']
del data['_id']
return data
# body organization list
@app.route('/oparl/body/<string:body_id>/organization')
def oparl_body_organization(body_id):
return oparl_basic(oparl_body_organization_data,
params={'body_id':body_id})
def oparl_body_organization_data(params):
search_params = oparl_generate_list_search_params(params)
data = db.get_organization(search_params = search_params, limit=app.config['oparl_items_per_page'])
data = oparl_generate_list_items(params=params,
search_params=search_params,
result_count=db.get_organization_count(search_params=search_params),
data=data,
type='organization')
for key, single in enumerate(data['data']):
data['data'][key] = oparl_organization_layout(data=single, params=params)
return data
# body person list
@app.route('/oparl/body/<string:body_id>/person')
def oparl_body_person(body_id):
return oparl_basic(oparl_body_person_data,
params={'body_id': body_id})
def oparl_body_person_data(params):
search_params = oparl_generate_list_search_params(params)
data = db.get_person(search_params = search_params,
limit=app.config['oparl_items_per_page'],
deref={'values': ['membership']})
data = oparl_generate_list_items(params=params,
search_params=search_params,
result_count=db.get_person_count(search_params=search_params),
data=data,
type='person')
for key_person, single_person in enumerate(data['data']):
data['data'][key_person] = oparl_person_layout(data=single_person, params=params)
return data
# body meeting list
@app.route('/oparl/body/<string:body_id>/meeting')
def oparl_body_meeting(body_id):
return oparl_basic(oparl_body_meeting_data,
params = {'body_id': body_id})
def oparl_body_meeting_data(params):
search_params = oparl_generate_list_search_params(params)
data = db.get_meeting(search_params = search_params,
limit=app.config['oparl_items_per_page'],
deref={'values': ['invitation', 'resultsProtocol', 'agendaItem', 'auxiliaryFile']})
data = oparl_generate_list_items(params=params,
search_params=search_params,
result_count=db.get_meeting_count(search_params=search_params),
data=data,
type='meeting')
for key_meeting, single_meeting in enumerate(data['data']):
data['data'][key_meeting] = oparl_meeting_layout(data=single_meeting, params=params)
return data
# body paper list
@app.route('/oparl/body/<string:body_id>/paper')
def oparl_body_paper(body_id):
return oparl_basic(oparl_body_paper_data,
params={'body_id': body_id})
def oparl_body_paper_data(params):
search_params = oparl_generate_list_search_params(params)
data = db.get_paper(search_params = search_params,
limit=app.config['oparl_items_per_page'],
deref={'values': ['mainFile', 'auxiliaryFile', 'consultation', 'location', 'originatorPerson', 'originatorOrganization']})
data = oparl_generate_list_items(params=params,
search_params=search_params,
result_count=db.get_paper_count(search_params=search_params),
data=data,
type='paper')
for key_paper, single_paper in enumerate(data['data']):
data['data'][key_paper] = oparl_paper_layout(data=single_paper, params=params)
return data
####################################################
# organization
####################################################
# single organization
@app.route('/oparl/organization/<string:organization_id>')
def oparl_organization(organization_id):
return oparl_basic(oparl_organization_data, params={'_id':organization_id})
def oparl_organization_data(params):
data = db.get_organization(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
return oparl_organization_layout(data[0], params)
else:
abort(404)
def oparl_organization_layout(data, params):
# default values
data['id'] = "%s/oparl/organization/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'https://oparl.org/schema/1.0/Organization'
data['body'] = "%s/oparl/body/%s%s" % (app.config['api_url'], data['body'].id, generate_postfix(params))
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
if 'startDate' in data:
if isinstance(data['startDate'], datetime.datetime):
data['startDate'] = data['startDate'].strftime("%Y-%m-%d")
if 'endDate' in data:
if isinstance(data['endDate'], datetime.datetime):
data['endDate'] = data['endDate'].strftime("%Y-%m-%d")
data['membership'] = generate_backref_list(db.get_membership(search_params={'organization': DBRef('organization', ObjectId(data['_id']))}), params)
data['meeting'] = "%s/oparl/organization/%s/meeting%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
@app.route('/oparl/organization/<string:organization_id>/meeting')
def oparl_organization_meeting(organization_id):
return oparl_basic(oparl_organization_meeting_data, params={'_id':organization_id})
def oparl_organization_meeting_data(params):
meetings = db.get_meeting(search_params={'organization': DBRef('organization', ObjectId(params['_id']))})
result = []
for meeting in meetings:
result.append(oparl_meeting_layout(meeting, params))
return result
####################################################
# membership
####################################################
# single membership
@app.route('/oparl/membership/<string:membership_id>')
def oparl_membership(membership_id):
return oparl_basic(oparl_membership_data, params={'_id': membership_id})
def oparl_membership_data(params):
data = db.get_membership(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
return oparl_membership_layout(data[0], params)
elif len(data) == 0:
abort(404)
def oparl_membership_layout(data, params):
# default values
data['id'] = "%s/oparl/membership/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/Membership'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
if 'startDate' in data:
if isinstance(data['startDate'], datetime.datetime):
data['startDate'] = data['startDate'].strftime("%Y-%m-%d")
if 'endDate' in data:
if isinstance(data['endDate'], datetime.datetime):
data['endDate'] = data['endDate'].strftime("%Y-%m-%d")
data['organization'] = "%s/oparl/organization/%s%s" % (app.config['api_url'], data['organization'].id, generate_postfix(params))
data['person'] = "%s/oparl/person/%s%s" % (app.config['api_url'], db.get_person(search_params={'membership': DBRef('membership', ObjectId(data['_id']))})[0]['_id'], generate_postfix(params))
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
####################################################
# person
####################################################
# single person
@app.route('/oparl/person/<string:person_id>')
def oparl_person(person_id):
return oparl_basic(oparl_person_data, params={'_id': person_id})
def oparl_person_data(params):
data = db.get_person(search_params={'_id': ObjectId(params['_id'])},
deref={'values': ['membership']})
if len(data) == 1:
return oparl_person_layout(data[0], params)
elif len(data) == 0:
abort(404)
def oparl_person_layout(data, params):
# default values
data['id'] = "%s/oparl/person/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/Person'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
memberships = []
for single_membership in data['membership']:
memberships.append(oparl_membership_layout(single_membership, params))
data['membership'] = memberships
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
####################################################
# meeting
####################################################
# single meeting
@app.route('/oparl/meeting/<string:meeting_id>')
def oparl_meeting(meeting_id):
return oparl_basic(oparl_meeting_data, params={'_id': meeting_id})
def oparl_meeting_data(params):
data = db.get_meeting(search_params={'_id': ObjectId(params['_id'])},
deref={'values': ['invitation', 'resultsProtocol', 'agendaItem', 'auxiliaryFile']})
if len(data) == 1:
return oparl_meeting_layout(data[0], params)
elif len(data) == 0:
abort(404)
def oparl_meeting_layout(data, params):
# default values
data['id'] = "%s/oparl/meeting/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/Meeting'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
if 'start' in data:
if isinstance(data['start'], datetime.datetime):
data['start'] = data['start'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
if 'end' in data:
if isinstance(data['end'], datetime.datetime):
data['end'] = data['end'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
if 'address' in data:
data['PolitikBeiUns:address'] = data['address']
del data['address']
if 'room' in data:
data['PolitikBeiUns:room'] = data['room']
del data['room']
# if invitation is list -> Bug
if 'invitation' in data:
if isinstance(data['invitation'], list):
# invitation is list -> Bug
if len(data['invitation']):
data['invitation'] = data['invitation'][0]
else:
del data['invitation']
if 'invitation' in data:
if data['invitation']:
data['invitation'] = oparl_file_layout(data['invitation'], params)
else:
del data['invitation']
if 'resultsProtocol' in data:
if data['resultsProtocol']:
data['resultsProtocol'] = oparl_file_layout(data['resultsProtocol'], params)
else:
del data['resultsProtocol']
if 'verbatimProtocol' in data:
if data['verbatimProtocol']:
data['verbatimProtocol'] = oparl_file_layout(data['verbatimProtocol'], params)
else:
del data['verbatimProtocol']
if 'participant' in data:
data['membership'] = generate_backref_list(data['participant'], params)
if 'auxiliaryFile' in data:
auxiliaryFiles = []
for single_auxiliaryFile in data['auxiliaryFile']:
if single_auxiliaryFile:
auxiliaryFiles.append(oparl_file_layout(single_auxiliaryFile, params))
if len(auxiliaryFiles):
data['auxiliaryFile'] = auxiliaryFiles
else:
del data['auxiliaryFile']
if 'agendaItem' in data:
agendaItems = []
for single_agendaItem in data['agendaItem']:
if single_agendaItem:
agendaItems.append(oparl_agendaItem_layout(single_agendaItem, params))
if len(agendaItems):
data['agendaItem'] = agendaItems
else:
del data['agendaItem']
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
####################################################
# agendaItem
####################################################
# single agendaItem
@app.route('/oparl/agendaItem/<string:agendaItem_id>')
def oparl_agendaItem(agendaItem_id):
return oparl_basic(oparl_agendaItem_data, params={'_id': agendaItem_id})
def oparl_agendaItem_data(params):
data = db.get_agendaItem(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
data = oparl_agendaItem_layout(data[0], params)
meeting = db.get_meeting(search_params={'agendaItem': DBRef('agendaItem', ObjectId(params['_id']))})
if len(meeting):
data['meeting'] = "%s/oparl/meeting/%s%s" % (app.config['api_url'], meeting[0]['_id'], generate_postfix(params))
return data
elif len(data) == 0:
abort(404)
def oparl_agendaItem_layout(data, params):
# default values
data['id'] = "%s/oparl/agendaItem/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/AgendaItem'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
if 'start' in data:
if isinstance(data['start'], datetime.datetime):
data['start'] = data['start'].strftime("%Y-%m-%dT%H:%M:%S")
if 'end' in data:
if isinstance(data['end'], datetime.datetime):
data['end'] = data['end'].strftime("%Y-%m-%dT%H:%M:%S")
if 'consultation' in data:
data['consultation'] = "%s/oparl/consultation/%s%s" % (app.config['api_url'], data['consultation'].id, generate_postfix(params))
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
####################################################
# consultation
####################################################
# single consultation
@app.route('/oparl/consultation/<string:consultation_id>')
def oparl_consultation(consultation_id):
return oparl_basic(oparl_consultation_data, params={'_id': consultation_id})
def oparl_consultation_data(params):
data = db.get_consultation(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
data = oparl_consultation_layout(data[0], params)
agendaItem = db.get_agendaItem(search_params={'consultation': DBRef('consultation', ObjectId(params['_id']))})
if len(agendaItem):
data['agendaItem'] = "%s/oparl/agendaItem/%s%s" % (app.config['api_url'], agendaItem[0]['_id'], generate_postfix(params))
return data
elif len(data) == 0:
abort(404)
def oparl_consultation_layout(data, params):
# default values
data['id'] = "%s/oparl/consultation/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/Consultation'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
if 'publishedDate' in data:
if isinstance(data['publishedDate'], datetime.datetime):
data['publishedDate'] = data['publishedDate'].strftime("%Y-%m-%d")
if 'paper' in data:
data['paper'] = "%s/oparl/paper/%s%s" % (app.config['api_url'], data['paper'].id, generate_postfix(params))
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
return data
####################################################
# paper
####################################################
# single paper
@app.route('/oparl/paper/<string:paper_id>')
def oparl_paper(paper_id):
return oparl_basic(oparl_paper_data, params={'_id': paper_id})
def oparl_paper_data(params):
data = db.get_paper(search_params={'_id': ObjectId(params['_id'])},
deref={'values': ['mainFile', 'auxiliaryFile', 'consultation', 'location', 'originatorPerson', 'originatorOrganization']})
if len(data) == 1:
return oparl_paper_layout(data[0], params)
elif len(data) == 0:
abort(404)
def oparl_paper_layout(data, params):
# default values
data['id'] = "%s/oparl/paper/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'http://oparl.org/schema/1.0/Paper'
data['body'] = generate_single_url(params=params, type='body', id=data['body'].id)
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# mainFile
if 'mainFile' in data:
if data['mainFile']:
data['mainFile'] = oparl_file_layout(data['mainFile'], params)
else:
del data['mainFile']
# auxiliaryFiles
if 'auxiliaryFile' in data:
auxiliaryFiles = []
for single_auxiliaryFile in data['auxiliaryFile']:
if single_auxiliaryFile:
auxiliaryFiles.append(oparl_file_layout(single_auxiliaryFile, params))
if len(auxiliaryFiles):
data['auxiliaryFile'] = auxiliaryFiles
else:
del data['data']
data['consultation'] = []
consultations = db.get_consultation(search_params={'paper': DBRef('paper', ObjectId(data['_id']))})
for consultation in consultations:
data['consultation'].append(oparl_consultation_layout(consultation, params))
if len(data['consultation']) == 0:
del data['consultation']
# additional transformations
if 'publishedDate' in data:
if isinstance(data['publishedDate'], datetime.datetime):
data['PolitikBeiUns:publishedDate'] = data['publishedDate'].strftime("%Y-%m-%d")
del data['publishedDate']
# TODO for data model
if 'georeferences' in data:
del data['georeferences']
if 'georeferences' in data:
del data['georeferencesGenerated']
if 'title' in data:
del data['title']
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
if 'nameShort' in data:
data['reference'] = data['nameShort']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'slug' in data:
del data['slug']
if 'nameShort' in data:
del data['nameShort']
if 'georeferencesGenerated' in data:
del data['georeferencesGenerated']
return data
####################################################
# file
####################################################
# single file
@app.route('/oparl/file/<string:file_id>')
def oparl_document(file_id):
return oparl_basic(oparl_file_data, params={'_id': file_id})
def oparl_file_data(params):
data = db.get_file(search_params={'_id': ObjectId(params['_id'])})
if len(data) == 1:
data = oparl_file_layout(data[0], params)
# Get Backrefs for Meeting
data['meeting'] = []
meeting = db.get_meeting(search_params={'invitation': DBRef('file', ObjectId(params['_id']))})
if len(meeting):
data['meeting'].append("%s/oparl/meeting/%s%s" % (app.config['api_url'], meeting[0]['_id'], generate_postfix(params)))
meeting = db.get_meeting(search_params={'resultsProtocol': DBRef('file', ObjectId(params['_id']))})
if len(meeting):
data['meeting'].append("%s/oparl/meeting/%s%s" % (app.config['api_url'], meeting[0]['_id'], generate_postfix(params)))
meeting = db.get_meeting(search_params={'verbatimProtocol': DBRef('file', ObjectId(params['_id']))})
if len(meeting):
data['meeting'].append("%s/oparl/meeting/%s%s" % (app.config['api_url'], meeting[0]['_id'], generate_postfix(params)))
meeting = db.get_meeting(search_params={'verbatimProtocol': DBRef('auxiliaryFile', ObjectId(params['_id']))})
for single_meeting in meeting:
data['meeting'].append("%s/oparl/meeting/%s%s" % (app.config['api_url'], single_meeting['_id'], generate_postfix(params)))
if len(data['meeting']) == 0:
del data['meeting']
# Get Backrefs for AgendaItem
data['agendaItem'] = []
agendaItem = db.get_agendaItem(search_params={'resolutionFile': DBRef('file', ObjectId(params['_id']))})
if len(agendaItem):
data['agendaItem'].append("%s/oparl/agendaItem/%s%s" % (app.config['api_url'], agendaItem[0]['_id'], generate_postfix(params)))
agendaItem = db.get_agendaItem(search_params={'auxiliaryFile': DBRef('file', ObjectId(params['_id']))})
for single_agendaItem in agendaItem:
data['agendaItem'].append("%s/oparl/agendaItem/%s%s" % (app.config['api_url'], single_agendaItem['_id'], generate_postfix(params)))
if len(data['agendaItem']) == 0:
del data['agendaItem']
# Get Backrefs for Paper
data['paper'] = []
paper = db.get_agendaItem(search_params={'mainFile': DBRef('file', ObjectId(params['_id']))})
if len(paper):
data['paper'].append("%s/oparl/paper/%s%s" % (app.config['api_url'], paper[0]['_id'], generate_postfix(params)))
paper = db.get_agendaItem(search_params={'auxiliaryFile': DBRef('file', ObjectId(params['_id']))})
for single_paper in paper:
data['paper'].append("%s/oparl/paper/%s%s" % (app.config['api_url'], single_paper['_id'], generate_postfix(params)))
if len(data['paper']) == 0:
del data['paper']
return data
elif len(data) == 0:
abort(404)
def oparl_file_layout(data, params):
# default values
data['id'] = "%s/oparl/file/%s%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['type'] = 'https://oparl.org/schema/1.0/File'
data['body'] = "%s/oparl/body/%s%s" % (app.config['api_url'], data['body'].id, generate_postfix(params))
data['created'] = data['created'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
data['modified'] = data['modified'].strftime("%Y-%m-%dT%H:%M:%S+01:00")
# additional transformations
data['accessUrl'] = "%s/oparl/file/%s/access%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
data['downloadUrl'] = "%s/oparl/file/%s/download%s" % (app.config['api_url'], data['_id'], generate_postfix(params))
if 'date' in data:
if isinstance(data['date'], datetime.datetime):
data['date'] = data['date'].strftime("%Y-%m-%d")
# TODO: rename stuff
if 'fulltext' in data:
data['text'] = data['fulltext']
del data['fulltext']
if 'mimetype' in data:
data['mimeType'] = data['mimetype']
del data['mimetype']
if 'filename' in data:
data['fileName'] = data['filename']
del data['filename']
if 'originalId' in data:
data['PolitikBeiUns:originalId'] = data['originalId']
if 'originalUrl' in data:
data['PolitikBeiUns:originalUrl'] = data['originalUrl']
if 'originalDownloadPossible' in data:
data['PolitikBeiUns:originalDownloadPossible'] = data['originalDownloadPossible']
# delete stuff
del data['_id']
if 'originalId' in data:
del data['originalId']
if 'originalUrl' in data:
del data['originalUrl']
if 'originalDownloadPossible' in data:
del data['originalDownloadPossible']
if 'file' in data:
del data['file']
if 'thumbnails' in data:
del data['thumbnails']
if 'fulltextGenerated' in data:
del data['fulltextGenerated']
if 'thumbnailsGenerated' in data:
del data['thumbnailsGenerated']
return data
# file accessUrl
@app.route('/oparl/file/<string:file_id>/access')
def oparl_file_accessUrl(file_id):
return oparl_basic(oparl_file_accessUrl_data, params={'file_id': file_id}, direct_output=True)
def oparl_file_accessUrl_data(params):
file_data = db.get_file(deref={'values': ['file']},
search_params={'_id': ObjectId(params['file_id'])})
if len(file_data) == 0:
# TODO: Rendere informativere 404 Seite
abort(404)
file_data = file_data[0]
# extension doesn't match file extension (avoiding arbitrary URLs)
#proper_extension = attachment_info['filename'].split('.')[-1]
#if proper_extension != extension:
# abort(404)
# 'file' property is not set (e.g. due to depublication)
if 'file' not in file_data:
if 'depublication' in file_data:
abort(410) # Gone
else:
# TODO: log this as unexplicable...
abort(500)
# handle conditional GET
#if 'If-Modified-Since' in request.headers:
# file_date = attachment_info['file']['uploadDate'].replace(tzinfo=None)
# request_date = util.parse_rfc1123date(request.headers['If-Modified-Since'])
# difference = file_date - request_date
# if difference < datetime.timedelta(0, 1): # 1 second
# return Response(status=304)
#if 'if-none-match' in request.headers:
# print "Conditional GET: If-None-Match"
# TODO: handle ETag in request
handler = db.get_file_data(file_data['file']['_id'])
response = make_response(handler.read(), 200)
response.mimetype = file_data['mimetype']
response.headers['X-Robots-Tag'] = 'noarchive'
response.headers['ETag'] = file_data['sha1Checksum']
response.headers['Last-modified'] = util.rfc1123date(file_data['file']['uploadDate'])
response.headers['Expires'] = util.expires_date(hours=(24 * 30))
response.headers['Cache-Control'] = util.cache_max_age(hours=(24 * 30))
return response
# file downloadUrl
@app.route('/oparl/file/<string:file_id>/download')
def oparl_file_downloadUrl(file_id):
return oparl_basic(oparl_file_downloadUrl_data, params={'file_id': file_id}, direct_output=True)
def oparl_file_downloadUrl_data(params):
file_data = db.get_file(deref={'values': ['file']},
search_params={'_id': ObjectId(params['file_id'])})
if len(file_data) == 0:
# TODO: Rendere informativere 404 Seite
abort(404)
file_data = file_data[0]
# 'file' property is not set (e.g. due to depublication)
if 'file' not in file_data:
if 'depublication' in file_data:
abort(410) # Gone
else:
# TODO: log this as unexplicable...
abort(500)
handler = db.get_file_data(file_data['file']['_id'])
response = make_response(handler.read(), 200)
response.mimetype = file_data['mimetype']
response.headers['X-Robots-Tag'] = 'noarchive'
response.headers['ETag'] = file_data['sha1Checksum']
response.headers['Last-modified'] = util.rfc1123date(file_data['file']['uploadDate'])
response.headers['Expires'] = util.expires_date(hours=(24 * 30))
response.headers['Cache-Control'] = util.cache_max_age(hours=(24 * 30))
response.headers['Content-Disposition'] = 'attachment; filename=' + file_data['filename']
return response
####################################################
# misc
####################################################
def oparl_basic(content_fuction, params=None, direct_output=False):
start_time = time.time()
jsonp_callback = request.args.get('callback', None)
if not params:
params = {}
request_info = {}
html = request.args.get('html', False)
if html:
request_info['html'] = 1
extended_info = request.args.get('i')
extended_info = extended_info == '1'
if extended_info:
request_info['i'] = 1
search_query = request.args.get('q', "")
if search_query:
request_info['q'] = search_query
page = request.args.get('page')
try:
page = int(page)
except (ValueError, TypeError):
page = 1
request_info['page'] = page
params.update(request_info)
response = content_fuction(params)
if direct_output:
return response
if extended_info:
ret = {
'status': 0,
'duration': int((time.time() - start_time) * 1000),
'request': request_info,
'response': response
}
else:
ret = response
json_output = json.dumps(ret, cls=util.MyEncoder)#, sort_keys=True)
if jsonp_callback is not None:
json_output = jsonp_callback + '(' + json_output + ')'
if html:
return render_template('oparl.html', data=json.JSONDecoder(object_pairs_hook=OrderedDict).decode(json_output))
else:
response = make_response(json_output, 200)
response.mimetype = 'application/json'
response.headers['Expires'] = util.expires_date(hours=24)
response.headers['Cache-Control'] = util.cache_max_age(hours=24)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
def generate_postfix(params, additional_params=[]):
postfix = []
if 'html' in params:
postfix.append('html='+str(params['html']))
if 'p' in params:
if params['p'] > 1:
postfix.append('p='+str(params['p']))
if 'i' in params:
postfix.append('i='+str(params['i']))
postfix = postfix + additional_params
if len(postfix):
postfix = '?'+'&'.join(postfix)
else:
postfix = ''
return(postfix)
def generate_single_url(params={}, type='', id=''):
return "%s/oparl/%s/%s%s" % (app.config['api_url'], type, id, generate_postfix(params))
def generate_single_backref_url(params={}, get='', type='', reverse_type='', id=''):
get = getattr(db, get)
uid = str((get(search_params={reverse_type: DBRef(reverse_type, ObjectId(id))}, values={'_id':1}))[0]['_id'])
return "%s/oparl/%s/%s%s" % (app.config['api_url'], type, uid, generate_postfix(params))
def generate_backref_list(data, params):
result = []
for item in data:
result.append("%s/oparl/membership/%s%s" % (app.config['api_url'], item['_id'], generate_postfix(params)))
return result
def oparl_generate_list_search_params(params):
search_params = {'body': DBRef('body', ObjectId(params['body_id']))}
if 'q' in params:
search_params['modified'] = { '$lt': datetime.datetime.strptime(params['q'].split(':<')[1], "%Y-%m-%dT%H:%M:%S.%f") }
return search_params
def oparl_generate_list_items(params, search_params, result_count, data, type):
result = {
'data': data,
'pagination': {
'elementsPerPage': app.config['oparl_items_per_page']
},
'links': {
}
}
if result_count > app.config['oparl_items_per_page']:
result['links']['next'] = '%s/oparl/body/%s/%s%s' % (app.config['api_url'], params['body_id'], type, generate_postfix(params, ['q=modified:<%s' % result['data'][9]['modified'].strftime("%Y-%m-%dT%H:%M:%S.%f")]))
if 'modified' in search_params:
result['links']['first'] = '%s/oparl/body/%s/%s%s' % (app.config['api_url'], params['body_id'], type, generate_postfix(params))
return result
|
# -*- coding: utf-8 -*-
#
# PASTAS documentation build configuration file, created by
# sphinx-quickstart on Wed May 11 12:38:06 2016.
# Repository setup is according to:
# http://gisellezeno.com/tutorials/sphinx-for-python-documentation.html
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import alabaster
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'IPython.sphinxext.ipython_console_highlighting',# lowercase didn't work
'nbsphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PASTAS'
copyright = u'2017, M. Bakker, R.A. Collenteur, R. Calje, F. Schaars'
author = u'M. Bakker, R.A. Collenteur, R. Calje, F. Schaars'
rst_epilog = '.. |project| replace:: %s' % project
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_theme_path = [alabaster.get_path()]
html_static_path = ['_static']
html_theme_options = {
'logo': 'logo.png',
'travis_button': True,
'logo_name': False,
'github_user': 'pastas',
'github_repo': 'pastas',
'github_banner': True,
'github_button': True,
'github_type': 'watch',
'github_count': True,
'description': "Pastas is an open-source framework for the analysis of "
"hydrological time series.",
'codecov_button': True,
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
# located at _templates/
'sidebar.html',
]
}
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "PASTAS"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'PASTASdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PASTAS.tex', u'PASTAS Documentation',
u'M. Bakker, R.A. Collenteur, R. Calje, F. Schaars', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pastas', u'PASTAS Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PASTA', u'PASTAS Documentation',
author, 'PASTAS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/3': None,
'http://pandas.pydata.org/pandas-docs/stable/': None,
'https://docs.scipy.org/doc/scipy/reference/': None,
'https://docs.scipy.org/doc/numpy/': None}
Fix readthedocs build
# -*- coding: utf-8 -*-
#
# PASTAS documentation build configuration file, created by
# sphinx-quickstart on Wed May 11 12:38:06 2016.
# Repository setup is according to:
# http://gisellezeno.com/tutorials/sphinx-for-python-documentation.html
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import alabaster
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'nbsphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PASTAS'
copyright = u'2017, M. Bakker, R.A. Collenteur, R. Calje, F. Schaars'
author = u'M. Bakker, R.A. Collenteur, R. Calje, F. Schaars'
rst_epilog = '.. |project| replace:: %s' % project
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9'
# The full version, including alpha/beta/rc tags.
release = '0.9'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_theme_path = [alabaster.get_path()]
html_static_path = ['_static']
html_theme_options = {
'logo': 'logo.png',
'travis_button': True,
'logo_name': False,
'github_user': 'pastas',
'github_repo': 'pastas',
'github_banner': True,
'github_button': True,
'github_type': 'watch',
'github_count': True,
'description': "Pastas is an open-source framework for the analysis of "
"hydrological time series.",
'codecov_button': True,
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
# located at _templates/
'sidebar.html',
]
}
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "PASTAS"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'PASTASdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PASTAS.tex', u'PASTAS Documentation',
u'M. Bakker, R.A. Collenteur, R. Calje, F. Schaars', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = 'logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pastas', u'PASTAS Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PASTA', u'PASTAS Documentation',
author, 'PASTAS', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/3': None,
'http://pandas.pydata.org/pandas-docs/stable/': None,
'https://docs.scipy.org/doc/scipy/reference/': None,
'https://docs.scipy.org/doc/numpy/': None}
|
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from pbr import version as pbr_version
CINDER_VENDOR = "OpenStack Foundation"
CINDER_PRODUCT = "OpenStack Cinder"
CINDER_PACKAGE = None # OS distro package version suffix
loaded = False
version_info = pbr_version.VersionInfo('cinder')
version_string = version_info.version_string
Remove runtime dep on python-pbr, python-d2to1
Requires RPM spec to fill in REDHATCINDERVERSION.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
CINDER_VENDOR = "OpenStack Foundation"
CINDER_PRODUCT = "OpenStack Cinder"
CINDER_PACKAGE = None # OS distro package version suffix
loaded = False
class VersionInfo:
version = "REDHATCINDERVERSION"
release = "REDHATCINDERRELEASE"
def release_string(self):
return '%s-%s' % (self.version, self.release)
def version_string(self):
return self.version
version_info = VersionInfo()
version_string = version_info.version_string
|
#copyright ReportLab Inc. 2000
#see license.txt for license details
#history http://cvs.sourceforge.net/cgi-bin/cvsweb.cgi/reportlab/platypus/paragraph.py?cvsroot=reportlab
#$Header: /tmp/reportlab/reportlab/platypus/paragraph.py,v 1.34 2000/12/01 01:53:50 aaron_watters Exp $
__version__=''' $Id: paragraph.py,v 1.34 2000/12/01 01:53:50 aaron_watters Exp $ '''
import string
from types import StringType, ListType
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.platypus.paraparser import ParaParser
from reportlab.platypus.flowables import Flowable
from reportlab.lib.colors import Color
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.utils import _className
from copy import deepcopy
from reportlab.lib.abag import ABag
class ParaLines(ABag):
"""class ParaLines contains the broken into lines representation of Paragraphs
"""
#our one and only parser
# XXXXX if the parser has any internal state using only one is probably a BAD idea!
_parser=ParaParser()
def cleanBlockQuotedText(text,joiner=' '):
"""This is an internal utility which takes triple-
quoted text form within the document and returns
(hopefully) the paragraph the user intended originally."""
stripped = string.strip(text)
lines = string.split(stripped, '\n')
trimmed_lines = map(string.lstrip, lines)
return string.join(trimmed_lines, joiner)
def _leftDrawParaLine( tx, offset, extraspace, words, last=0):
tx.setXPos(offset)
tx._textOut(string.join(words),1)
def _centerDrawParaLine( tx, offset, extraspace, words, last=0):
m = offset + 0.5 * extraspace
tx.setXPos(m)
tx._textOut(string.join(words),1)
def _rightDrawParaLine( tx, offset, extraspace, words, last=0):
m = offset + extraspace
tx.setXPos(m)
tx._textOut(string.join(words),1)
def _justifyDrawParaLine( tx, offset, extraspace, words, last=0):
tx.setXPos(offset)
text = string.join(words)
if last:
#last one, left align
tx._textOut(text,1)
else:
tx.setWordSpace(extraspace / float(len(words)-1))
tx._textOut(text,1)
tx.setWordSpace(0)
def _putFragLine(tx,words):
for f in words:
if hasattr(f,'cbDefn'):
func = getattr(tx._canvas,f.cbDefn.name,None)
if not func:
raise AttributeError, "Missing %s callback attribute '%s'" % (f.cbDefn.kind,f.cbDefn.name)
func(tx._canvas,f.cbDefn.kind,f.cbDefn.label)
if f is words[-1]: tx._textOut('',1)
else:
if (tx._fontname,tx._fontsize)!=(f.fontName,f.fontSize):
tx._setFont(f.fontName, f.fontSize)
if tx.XtraState.textColor!=f.textColor:
tx.XtraState.textColor = f.textColor
tx.setFillColor(f.textColor)
if tx.XtraState.rise!=f.rise:
tx.XtraState.rise=f.rise
tx.setRise(f.rise)
tx._textOut(f.text,f is words[-1]) # cheap textOut
def _leftDrawParaLineX( tx, offset, line, last=0):
tx.setXPos(offset)
_putFragLine(tx, line.words)
def _centerDrawParaLineX( tx, offset, line, last=0):
m = offset+0.5*line.extraSpace
tx.setXPos(m)
_putFragLine(tx, line.words)
def _rightDrawParaLineX( tx, offset, line, last=0):
m = offset+line.extraSpace
tx.setXPos(m)
_putFragLine(tx, line.words)
def _justifyDrawParaLineX( tx, offset, line, last=0):
if last:
#last one, left align
tx.setXPos(offset)
_putFragLine(tx, line.words)
else:
tx.setXPos(offset)
tx.setWordSpace(line.extraSpace / float(line.wordCount-1))
_putFragLine(tx, line.words)
tx.setWordSpace(0)
def _sameFrag(f,g):
'returns 1 if two ParaFrags map out the same'
if hasattr(f,'cbDefn') or hasattr(g,'cbDefn'): return 0
for a in ('fontName', 'fontSize', 'textColor', 'rise'):
if getattr(f,a)!=getattr(g,a): return 0
return 1
def _getFragWords(frags):
''' given a Parafrag list return a list of lists
[[size, (f00,w00), ..., (f0n,w0n)],....,[size, (fm0,wm0), ..., (f0n,wmn)]]
each pair f,w represents a style and some string
each sublist represents a word
'''
R = []
W = []
n = 0
for f in frags:
text = f.text
#del f.text # we can't do this until we sort out splitting
# of paragraphs
if text!='':
S = string.split(text,' ')
if S[-1]=='': del S[-1]
if W!=[] and text[0] in [' ','\t']:
W.insert(0,n)
R.append(W)
W = []
n = 0
for w in S[:-1]:
W.append((f,w))
n = n + stringWidth(w, f.fontName, f.fontSize)
W.insert(0,n)
R.append(W)
W = []
n = 0
w = S[-1]
W.append((f,w))
n = n + stringWidth(w, f.fontName, f.fontSize)
if text[-1] in [' ','\t']:
W.insert(0,n)
R.append(W)
W = []
n = 0
elif hasattr(f,'cbDefn'):
if W!=[]:
W.insert(0,n)
R.append(W)
W = []
n = 0
R.append([0,(f,'')])
if W!=[]:
W.insert(0,n)
R.append(W)
return R
def _split_blParaSimple(blPara,start,stop):
f = blPara.clone()
for a in ('lines', 'kind', 'text'):
if hasattr(f,a): delattr(f,a)
f.words = []
for l in blPara.lines[start:stop]:
for w in l[1]:
f.words.append(w)
return [f]
def _split_blParaHard(blPara,start,stop):
f = []
lines = blPara.lines[start:stop]
for l in lines:
for w in l.words:
f.append(w)
if l is not lines[-1]:
f[-1].text = f[-1].text+' '
return f
def _drawBullet(canvas, offset, cur_y, bulletText, style):
'''draw a bullet text could be a simple string or a frag list'''
tx2 = canvas.beginText(style.bulletIndent, cur_y)
tx2.setFont(style.bulletFontName, style.bulletFontSize)
tx2.setFillColor(hasattr(style,'bulletColor') and style.bulletColor or style.textColor)
if type(bulletText) is StringType:
tx2.textOut(bulletText)
else:
for f in bulletText:
tx2.setFont(f.fontName, f.fontSize)
tx2.setFillColor(f.textColor)
tx2.textOut(f.text)
bulletEnd = tx2.getX()
offset = max(offset, bulletEnd - style.leftIndent)
canvas.drawText(tx2)
return offset
def _handleBulletWidth(bulletText,style,maxWidths):
'''work out bullet width and adjust maxWidths[0] if neccessary
'''
if bulletText <> None:
if type(bulletText) is StringType:
bulletWidth = stringWidth(
bulletText,
style.bulletFontName, style.bulletFontSize)
else:
#it's a list of fragments
bulletWidth = 0
for f in bulletText:
bulletWidth = bulletWidth + stringWidth(f.text, f.fontName, f.fontSize)
bulletRight = style.bulletIndent + bulletWidth
if bulletRight > style.firstLineIndent:
#..then it overruns, and we have less space available on line 1
maxWidths[0] = maxWidths[0] - (bulletRight - style.firstLineIndent)
class Paragraph(Flowable):
""" Paragraph(text, style, bulletText=None)
text a string of stuff to go into the paragraph.
style is a style definition as in reportlab.lib.styles.
bulletText is an optional bullet defintion.
This class is a flowable that can format a block of text
into a paragraph with a given style.
The paragraph Text can contain XML-like markup including the tags:
<b> ... </b> - bold
<i> ... </i> - italics
<u> ... </u> - underline
<super> ... </super> - superscript
<sub> ... </sub> - subscript
<font name=fontfamily/fontname color=colorname size=float>
The whole may be surrounded by <para> </para> tags
It will also be able to handle any MathML specified Greek characters.
"""
def __init__(self, text, style, bulletText = None, frags=None):
self._setup(text, style, bulletText, frags, cleanBlockQuotedText)
def _setup(self, text, style, bulletText, frags, cleaner):
if frags is None:
text = cleaner(text)
style, frags, bulletTextFrags = _parser.parse(text,style)
if frags is None:
raise "xml parser error (%s) in paragraph beginning\n'%s'"\
% (_parser.errors[0],text[:min(30,len(text))])
if bulletTextFrags: bulletText = bulletTextFrags
#AR hack
self.text = text
self.frags = frags
self.style = style
self.bulletText = bulletText
self.debug = 0 #turn this on to see a pretty one with all the margins etc.
def wrap(self, availWidth, availHeight):
# work out widths array for breaking
self.width = availWidth
first_line_width = availWidth - self.style.firstLineIndent - self.style.rightIndent
later_widths = availWidth - self.style.leftIndent - self.style.rightIndent
self.blPara = self.breakLines([first_line_width, later_widths])
self.height = len(self.blPara.lines) * self.style.leading
#estimate the size
return (self.width, self.height)
def _get_split_blParaFunc(self):
return self.blPara.kind==0 and _split_blParaSimple or _split_blParaHard
def split(self,availWidth, availHeight):
if len(self.frags)<=0: return []
#the split information is all inside self.blPara
if not hasattr(self,'blPara'):
self.wrap(availWidth,availHeight)
blPara = self.blPara
style = self.style
leading = style.leading
lines = blPara.lines
n = len(lines)
s = int(availHeight/leading)
if s<=1: return []
if n<=s: return [self]
func = self._get_split_blParaFunc()
P1=self.__class__(None,style,bulletText=self.bulletText,frags=func(blPara,0,s))
P1._JustifyLast = 1
if style.firstLineIndent != style.leftIndent:
style = deepcopy(style)
style.firstLineIndent = style.leftIndent
P2=self.__class__(None,style,bulletText=None,frags=func(blPara,s,n))
return [P1,P2]
def draw(self):
#call another method for historical reasons. Besides, I
#suspect I will be playing with alternate drawing routines
#so not doing it here makes it easier to switch.
self.drawPara(self.debug)
def breakLines(self, width):
"""
Returns a broken line structure. There are two cases
A) For the simple case of a single formatting input fragment the output is
A fragment specifier with
kind = 0
fontName, fontSize, leading, textColor
lines= A list of lines
Each line has two items.
1) unused width in points
2) word list
B) When there is more than one input formatting fragment the out put is
A fragment specifier with
kind = 1
lines= A list of fragments each having fields
extraspace (needed for justified)
fontSize
words=word list
each word is itself a fragment with
various settings
This structure can be used to easily draw paragraphs with the various alignments.
You can supply either a single width or a list of widths; the latter will have its
last item repeated until necessary. A 2-element list is useful when there is a
different first line indent; a longer list could be created to facilitate custom wraps
around irregular objects."""
if type(width) <> ListType: maxWidths = [width]
else: maxWidths = width
lines = []
lineno = 0
maxWidth = maxWidths[lineno]
style = self.style
fFontSize = float(style.fontSize)
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText,style,maxWidths)
self.height = 0
frags = self.frags
nFrags= len(frags)
if nFrags==1:
f = frags[0]
fontSize = f.fontSize
fontName = f.fontName
words = hasattr(f,'text') and string.split(f.text, ' ') or f.words
spaceWidth = stringWidth(' ', fontName, fontSize)
cLine = []
currentWidth = - spaceWidth # hack to get around extra space for word 1
for word in words:
wordWidth = stringWidth(word, fontName, fontSize)
space_available = maxWidth - (currentWidth + spaceWidth + wordWidth)
if space_available > 0 or len(cLine)==0:
# fit one more on this line
cLine.append(word)
currentWidth = currentWidth + spaceWidth + wordWidth
else:
if currentWidth>self.width: self.width = currentWidth
#end of line
lines.append((maxWidth - currentWidth, cLine))
cLine = [word]
currentWidth = wordWidth
lineno = lineno + 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
#deal with any leftovers on the final line
if cLine!=[]:
if currentWidth>self.width: self.width = currentWidth
lines.append((maxWidth - currentWidth, cLine))
return f.clone(kind=0, lines=lines)
elif nFrags<=0:
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, lines=[])
else:
n = 0
for w in _getFragWords(frags):
spaceWidth = stringWidth(' ',w[-1][0].fontName, w[-1][0].fontSize)
if n==0:
currentWidth = -spaceWidth # hack to get around extra space for word 1
words = []
maxSize = 0
wordWidth = w[0]
f = w[1][0]
space_available = maxWidth - (currentWidth + spaceWidth + wordWidth)
if space_available > 0 or n==0:
# fit one more on this line
n = n + 1
maxSize = max(maxSize,f.fontSize)
nText = w[1][1]
if words==[]:
words = [f.clone()]
words[-1].text = nText
elif not _sameFrag(words[-1],f):
if nText!='' and nText[0]!=' ':
words[-1].text = words[-1].text + ' '
words.append(f.clone())
words[-1].text = nText
else:
if nText!='' and nText[0]!=' ':
words[-1].text = words[-1].text + ' ' + nText
for i in w[2:]:
f = i[0].clone()
f.text=i[1]
words.append(f)
maxSize = max(maxSize,f.fontSize)
currentWidth = currentWidth + spaceWidth + wordWidth
else:
if currentWidth>self.width: self.width = currentWidth
#end of line
lines.append(ParaLines(extraSpace=(maxWidth - currentWidth),wordCount=n,
words=words, fontSize=maxSize))
#start new line
lineno = lineno + 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
currentWidth = wordWidth
n = 1
maxSize = f.fontSize
words = [f.clone()]
words[-1].text = w[1][1]
for i in w[2:]:
f = i[0].clone()
f.text=i[1]
words.append(f)
maxSize = max(maxSize,f.fontSize)
#deal with any leftovers on the final line
if words<>[]:
if currentWidth>self.width: self.width = currentWidth
lines.append(ParaLines(extraSpace=(maxWidth - currentWidth),wordCount=n,
words=words, fontSize=maxSize))
return ParaLines(kind=1, lines=lines)
return lines
def drawPara(self,debug=0):
"""Draws a paragraph according to the given style.
Returns the final y position at the bottom. Not safe for
paragraphs without spaces e.g. Japanese; wrapping
algorithm will go infinite."""
#stash the key facts locally for speed
canvas = self.canv
style = self.style
blPara = self.blPara
lines = blPara.lines
#work out the origin for line 1
cur_x = style.leftIndent
if debug:
# This boxes and shades stuff to show how the paragraph
# uses its space. Useful for self-documentation so
# the debug code stays!
# box the lot
canvas.rect(0, 0, self.width, self.height)
#left and right margins
canvas.saveState()
canvas.setFillColor(Color(0.9,0.9,0.9))
canvas.rect(0, 0, style.leftIndent, self.height)
canvas.rect(self.width - style.rightIndent, 0, style.rightIndent, self.height)
# shade above and below
canvas.setFillColor(Color(1.0,1.0,0.0))
canvas.restoreState()
#self.drawLine(x + style.leftIndent, y, x + style.leftIndent, cur_y)
nLines = len(lines)
bulletText = self.bulletText
if nLines > 0:
canvas.saveState()
canvas.addLiteral('%% %s.drawPara' % _className(self))
alignment = style.alignment
offset = style.firstLineIndent - style.leftIndent
lim = nLines-1
noJustifyLast = not (hasattr(self,'_JustifyLast') and self._JustifyLast)
if blPara.kind==0:
if alignment == TA_LEFT:
dpl = _leftDrawParaLine
elif alignment == TA_CENTER:
dpl = _centerDrawParaLine
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLine
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLine
f = blPara
cur_y = self.height - f.fontSize
if bulletText <> None:
offset = _drawBullet(canvas,offset,cur_y,bulletText,style)
#set up the font etc.
canvas._code.append('%s %s %s rg' % (f.textColor.red, f.textColor.green, f.textColor.blue))
tx = canvas.beginText(cur_x, cur_y)
#now the font for the rest of the paragraph
tx.setFont(f.fontName, f.fontSize, style.leading)
dpl( tx, offset, lines[0][0], lines[0][1], noJustifyLast and nLines==1)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in range(1, nLines):
dpl( tx, 0, lines[i][0], lines[i][1], noJustifyLast and i==lim)
else:
f = lines[0]
cur_y = self.height - f.fontSize
# default?
dpl = _leftDrawParaLineX
if bulletText <> None:
offset = _drawBullet(canvas,offset,cur_y,bulletText,style)
if alignment == TA_LEFT:
dpl = _leftDrawParaLineX
elif alignment == TA_CENTER:
dpl = _centerDrawParaLineX
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLineX
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLineX
else:
raise ValueError, "bad align %s" % repr(alignment)
#set up the font etc.
tx = canvas.beginText(cur_x, cur_y)
tx.XtraState=ABag()
tx.XtraState.textColor=None
tx.XtraState.rise=0
tx.setLeading(style.leading)
#f = lines[0].words[0]
#tx._setFont(f.fontName, f.fontSize)
tx._fontname,tx._fontsize = None, None
dpl( tx, offset, lines[0], noJustifyLast and nLines==1)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in range(1, nLines):
f = lines[i]
dpl( tx, 0, f, noJustifyLast and i==lim)
canvas.drawText(tx)
canvas.restoreState()
def getPlainText(self):
"""Convenience function for templates which want access
to the raw text, without XML tags. """
plains = []
for frag in self.frags:
plains.append(frag.text)
return string.join(plains, '')
def getActualLineWidths0(self):
"""Convenience function; tells you how wide each line
actually is. For justified styles, this will be
the same as the wrap width; for others it might be
useful for seeing if paragraphs will fit in spaces."""
assert hasattr(self, 'width'), "Cannot call this method before wrap()"
w = []
for frag in self.blPara.lines:
w.append(self.width - frag.extraSpace)
return w
if __name__=='__main__': #NORUNTESTS
def dumpParagraphLines(P):
print 'dumpParagraphLines(%s)' % str(P)
lines = P.blPara.lines
n =len(lines)
for l in range(n):
line = lines[l]
words = line.words
nwords = len(words)
print 'line%d: %d(%d)\n ' % (l,nwords,line.wordCount),
for w in range(nwords):
print "%d:'%s'"%(w,words[w].text),
print
def dumpParagraphFrags(P):
print 'dumpParagraphLines(%s)' % str(P)
frags = P.frags
n =len(frags)
for l in range(n):
print "frag%d: '%s'" % (l, frags[l].text)
l = 0
for W in _getFragWords(frags):
print "fragword%d: size=%d" % (l, W[0]),
for w in W[1:]:
print "'%s'" % w[1],
print
l = l + 1
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
style = ParagraphStyle("discussiontext", parent=B)
style.fontName= 'Helvetica'
text='''The <font name=courier color=green>CMYK</font> or subtractive method follows the way a printer
mixes three pigments (cyan, magenta, and yellow) to form colors.
Because mixing chemicals is more difficult than combining light there
is a fourth parameter for darkness. For example a chemical
combination of the <font name=courier color=green>CMY</font> pigments generally never makes a perfect
black -- instead producing a muddy color -- so, to get black printers
don't use the <font name=courier color=green>CMY</font> pigments but use a direct black ink. Because
<font name=courier color=green>CMYK</font> maps more directly to the way printer hardware works it may
be the case that &| & | colors specified in <font name=courier color=green>CMYK</font> will provide better fidelity
and better control when printed.
'''
P=Paragraph(text,style)
dumpParagraphFrags(P)
aW, aH = 456.0, 42.8
w,h = P.wrap(aW, aH)
dumpParagraphLines(P)
S = P.split(aW,aH)
for s in S:
s.wrap(aW,aH)
dumpParagraphLines(s)
aH = 500
P=Paragraph("""Price<super><font color="red">*</font></super>""", styleSheet['Normal'])
dumpParagraphFrags(P)
w,h = P.wrap(24, 200)
dumpParagraphLines(P)
Cosmetic changes
#copyright ReportLab Inc. 2000
#see license.txt for license details
#history http://cvs.sourceforge.net/cgi-bin/cvsweb.cgi/reportlab/platypus/paragraph.py?cvsroot=reportlab
#$Header: /tmp/reportlab/reportlab/platypus/paragraph.py,v 1.35 2000/12/04 13:22:34 rgbecker Exp $
__version__=''' $Id: paragraph.py,v 1.35 2000/12/04 13:22:34 rgbecker Exp $ '''
import string
from types import StringType, ListType
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.platypus.paraparser import ParaParser
from reportlab.platypus.flowables import Flowable
from reportlab.lib.colors import Color
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.utils import _className
from copy import deepcopy
from reportlab.lib.abag import ABag
class ParaLines(ABag):
"""
class ParaLines contains the broken into lines representation of Paragraphs
kind=0 Simple
fontName, fontSize, textColor apply to whole Paragraph
lines [(extraSpace1,words1),....,(extraspaceN,wordsN)]
kind==1 Complex
lines [FragLine1,...,FragLineN]
"""
class FragLine(ABag):
"""class FragLine contains a styled line (ie a line with more than one style)
extraSpace unused space for justification only
wordCount 1+spaces in line for justification purposes
words [ParaFrags] style text lumps to be concatenated together
fontSize maximum fontSize seen on the line; not used at present,
but could be used for line spacing.
"""
#our one and only parser
# XXXXX if the parser has any internal state using only one is probably a BAD idea!
_parser=ParaParser()
def cleanBlockQuotedText(text,joiner=' '):
"""This is an internal utility which takes triple-
quoted text form within the document and returns
(hopefully) the paragraph the user intended originally."""
stripped = string.strip(text)
lines = string.split(stripped, '\n')
trimmed_lines = map(string.lstrip, lines)
return string.join(trimmed_lines, joiner)
def _leftDrawParaLine( tx, offset, extraspace, words, last=0):
tx.setXPos(offset)
tx._textOut(string.join(words),1)
def _centerDrawParaLine( tx, offset, extraspace, words, last=0):
m = offset + 0.5 * extraspace
tx.setXPos(m)
tx._textOut(string.join(words),1)
def _rightDrawParaLine( tx, offset, extraspace, words, last=0):
m = offset + extraspace
tx.setXPos(m)
tx._textOut(string.join(words),1)
def _justifyDrawParaLine( tx, offset, extraspace, words, last=0):
tx.setXPos(offset)
text = string.join(words)
if last:
#last one, left align
tx._textOut(text,1)
else:
tx.setWordSpace(extraspace / float(len(words)-1))
tx._textOut(text,1)
tx.setWordSpace(0)
def _putFragLine(tx,words):
for f in words:
if hasattr(f,'cbDefn'):
func = getattr(tx._canvas,f.cbDefn.name,None)
if not func:
raise AttributeError, "Missing %s callback attribute '%s'" % (f.cbDefn.kind,f.cbDefn.name)
func(tx._canvas,f.cbDefn.kind,f.cbDefn.label)
if f is words[-1]: tx._textOut('',1)
else:
if (tx._fontname,tx._fontsize)!=(f.fontName,f.fontSize):
tx._setFont(f.fontName, f.fontSize)
if tx.XtraState.textColor!=f.textColor:
tx.XtraState.textColor = f.textColor
tx.setFillColor(f.textColor)
if tx.XtraState.rise!=f.rise:
tx.XtraState.rise=f.rise
tx.setRise(f.rise)
tx._textOut(f.text,f is words[-1]) # cheap textOut
def _leftDrawParaLineX( tx, offset, line, last=0):
tx.setXPos(offset)
_putFragLine(tx, line.words)
def _centerDrawParaLineX( tx, offset, line, last=0):
m = offset+0.5*line.extraSpace
tx.setXPos(m)
_putFragLine(tx, line.words)
def _rightDrawParaLineX( tx, offset, line, last=0):
m = offset+line.extraSpace
tx.setXPos(m)
_putFragLine(tx, line.words)
def _justifyDrawParaLineX( tx, offset, line, last=0):
if last:
#last one, left align
tx.setXPos(offset)
_putFragLine(tx, line.words)
else:
tx.setXPos(offset)
tx.setWordSpace(line.extraSpace / float(line.wordCount-1))
_putFragLine(tx, line.words)
tx.setWordSpace(0)
def _sameFrag(f,g):
'returns 1 if two ParaFrags map out the same'
if hasattr(f,'cbDefn') or hasattr(g,'cbDefn'): return 0
for a in ('fontName', 'fontSize', 'textColor', 'rise'):
if getattr(f,a)!=getattr(g,a): return 0
return 1
def _getFragWords(frags):
''' given a Parafrag list return a list of lists
[[size, (f00,w00), ..., (f0n,w0n)],....,[size, (fm0,wm0), ..., (f0n,wmn)]]
each pair f,w represents a style and some string
each sublist represents a word
'''
R = []
W = []
n = 0
for f in frags:
text = f.text
#del f.text # we can't do this until we sort out splitting
# of paragraphs
if text!='':
S = string.split(text,' ')
if S[-1]=='': del S[-1]
if W!=[] and text[0] in [' ','\t']:
W.insert(0,n)
R.append(W)
W = []
n = 0
for w in S[:-1]:
W.append((f,w))
n = n + stringWidth(w, f.fontName, f.fontSize)
W.insert(0,n)
R.append(W)
W = []
n = 0
w = S[-1]
W.append((f,w))
n = n + stringWidth(w, f.fontName, f.fontSize)
if text[-1] in [' ','\t']:
W.insert(0,n)
R.append(W)
W = []
n = 0
elif hasattr(f,'cbDefn'):
if W!=[]:
W.insert(0,n)
R.append(W)
W = []
n = 0
R.append([0,(f,'')])
if W!=[]:
W.insert(0,n)
R.append(W)
return R
def _split_blParaSimple(blPara,start,stop):
f = blPara.clone()
for a in ('lines', 'kind', 'text'):
if hasattr(f,a): delattr(f,a)
f.words = []
for l in blPara.lines[start:stop]:
for w in l[1]:
f.words.append(w)
return [f]
def _split_blParaHard(blPara,start,stop):
f = []
lines = blPara.lines[start:stop]
for l in lines:
for w in l.words:
f.append(w)
if l is not lines[-1]:
f[-1].text = f[-1].text+' '
return f
def _drawBullet(canvas, offset, cur_y, bulletText, style):
'''draw a bullet text could be a simple string or a frag list'''
tx2 = canvas.beginText(style.bulletIndent, cur_y)
tx2.setFont(style.bulletFontName, style.bulletFontSize)
tx2.setFillColor(hasattr(style,'bulletColor') and style.bulletColor or style.textColor)
if type(bulletText) is StringType:
tx2.textOut(bulletText)
else:
for f in bulletText:
tx2.setFont(f.fontName, f.fontSize)
tx2.setFillColor(f.textColor)
tx2.textOut(f.text)
bulletEnd = tx2.getX()
offset = max(offset, bulletEnd - style.leftIndent)
canvas.drawText(tx2)
return offset
def _handleBulletWidth(bulletText,style,maxWidths):
'''work out bullet width and adjust maxWidths[0] if neccessary
'''
if bulletText <> None:
if type(bulletText) is StringType:
bulletWidth = stringWidth(
bulletText,
style.bulletFontName, style.bulletFontSize)
else:
#it's a list of fragments
bulletWidth = 0
for f in bulletText:
bulletWidth = bulletWidth + stringWidth(f.text, f.fontName, f.fontSize)
bulletRight = style.bulletIndent + bulletWidth
if bulletRight > style.firstLineIndent:
#..then it overruns, and we have less space available on line 1
maxWidths[0] = maxWidths[0] - (bulletRight - style.firstLineIndent)
class Paragraph(Flowable):
""" Paragraph(text, style, bulletText=None)
text a string of stuff to go into the paragraph.
style is a style definition as in reportlab.lib.styles.
bulletText is an optional bullet defintion.
This class is a flowable that can format a block of text
into a paragraph with a given style.
The paragraph Text can contain XML-like markup including the tags:
<b> ... </b> - bold
<i> ... </i> - italics
<u> ... </u> - underline
<super> ... </super> - superscript
<sub> ... </sub> - subscript
<font name=fontfamily/fontname color=colorname size=float>
The whole may be surrounded by <para> </para> tags
It will also be able to handle any MathML specified Greek characters.
"""
def __init__(self, text, style, bulletText = None, frags=None):
self._setup(text, style, bulletText, frags, cleanBlockQuotedText)
def _setup(self, text, style, bulletText, frags, cleaner):
if frags is None:
text = cleaner(text)
style, frags, bulletTextFrags = _parser.parse(text,style)
if frags is None:
raise "xml parser error (%s) in paragraph beginning\n'%s'"\
% (_parser.errors[0],text[:min(30,len(text))])
if bulletTextFrags: bulletText = bulletTextFrags
#AR hack
self.text = text
self.frags = frags
self.style = style
self.bulletText = bulletText
self.debug = 0 #turn this on to see a pretty one with all the margins etc.
def wrap(self, availWidth, availHeight):
# work out widths array for breaking
self.width = availWidth
first_line_width = availWidth - self.style.firstLineIndent - self.style.rightIndent
later_widths = availWidth - self.style.leftIndent - self.style.rightIndent
self.blPara = self.breakLines([first_line_width, later_widths])
self.height = len(self.blPara.lines) * self.style.leading
#estimate the size
return (self.width, self.height)
def _get_split_blParaFunc(self):
return self.blPara.kind==0 and _split_blParaSimple or _split_blParaHard
def split(self,availWidth, availHeight):
if len(self.frags)<=0: return []
#the split information is all inside self.blPara
if not hasattr(self,'blPara'):
self.wrap(availWidth,availHeight)
blPara = self.blPara
style = self.style
leading = style.leading
lines = blPara.lines
n = len(lines)
s = int(availHeight/leading)
if s<=1: return []
if n<=s: return [self]
func = self._get_split_blParaFunc()
P1=self.__class__(None,style,bulletText=self.bulletText,frags=func(blPara,0,s))
P1._JustifyLast = 1
if style.firstLineIndent != style.leftIndent:
style = deepcopy(style)
style.firstLineIndent = style.leftIndent
P2=self.__class__(None,style,bulletText=None,frags=func(blPara,s,n))
return [P1,P2]
def draw(self):
#call another method for historical reasons. Besides, I
#suspect I will be playing with alternate drawing routines
#so not doing it here makes it easier to switch.
self.drawPara(self.debug)
def breakLines(self, width):
"""
Returns a broken line structure. There are two cases
A) For the simple case of a single formatting input fragment the output is
A fragment specifier with
kind = 0
fontName, fontSize, leading, textColor
lines= A list of lines
Each line has two items.
1) unused width in points
2) word list
B) When there is more than one input formatting fragment the out put is
A fragment specifier with
kind = 1
lines= A list of fragments each having fields
extraspace (needed for justified)
fontSize
words=word list
each word is itself a fragment with
various settings
This structure can be used to easily draw paragraphs with the various alignments.
You can supply either a single width or a list of widths; the latter will have its
last item repeated until necessary. A 2-element list is useful when there is a
different first line indent; a longer list could be created to facilitate custom wraps
around irregular objects."""
if type(width) <> ListType: maxWidths = [width]
else: maxWidths = width
lines = []
lineno = 0
maxWidth = maxWidths[lineno]
style = self.style
fFontSize = float(style.fontSize)
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText,style,maxWidths)
self.height = 0
frags = self.frags
nFrags= len(frags)
if nFrags==1:
f = frags[0]
fontSize = f.fontSize
fontName = f.fontName
words = hasattr(f,'text') and string.split(f.text, ' ') or f.words
spaceWidth = stringWidth(' ', fontName, fontSize)
cLine = []
currentWidth = - spaceWidth # hack to get around extra space for word 1
for word in words:
wordWidth = stringWidth(word, fontName, fontSize)
space_available = maxWidth - (currentWidth + spaceWidth + wordWidth)
if space_available > 0 or len(cLine)==0:
# fit one more on this line
cLine.append(word)
currentWidth = currentWidth + spaceWidth + wordWidth
else:
if currentWidth>self.width: self.width = currentWidth
#end of line
lines.append((maxWidth - currentWidth, cLine))
cLine = [word]
currentWidth = wordWidth
lineno = lineno + 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
#deal with any leftovers on the final line
if cLine!=[]:
if currentWidth>self.width: self.width = currentWidth
lines.append((maxWidth - currentWidth, cLine))
return f.clone(kind=0, lines=lines)
elif nFrags<=0:
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, lines=[])
else:
n = 0
for w in _getFragWords(frags):
spaceWidth = stringWidth(' ',w[-1][0].fontName, w[-1][0].fontSize)
if n==0:
currentWidth = -spaceWidth # hack to get around extra space for word 1
words = []
maxSize = 0
wordWidth = w[0]
f = w[1][0]
space_available = maxWidth - (currentWidth + spaceWidth + wordWidth)
if space_available > 0 or n==0:
# fit one more on this line
n = n + 1
maxSize = max(maxSize,f.fontSize)
nText = w[1][1]
if words==[]:
words = [f.clone()]
words[-1].text = nText
elif not _sameFrag(words[-1],f):
if nText!='' and nText[0]!=' ':
words[-1].text = words[-1].text + ' '
words.append(f.clone())
words[-1].text = nText
else:
if nText!='' and nText[0]!=' ':
words[-1].text = words[-1].text + ' ' + nText
for i in w[2:]:
f = i[0].clone()
f.text=i[1]
words.append(f)
maxSize = max(maxSize,f.fontSize)
currentWidth = currentWidth + spaceWidth + wordWidth
else:
if currentWidth>self.width: self.width = currentWidth
#end of line
lines.append(FragLine(extraSpace=(maxWidth - currentWidth),wordCount=n,
words=words, fontSize=maxSize))
#start new line
lineno = lineno + 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
currentWidth = wordWidth
n = 1
maxSize = f.fontSize
words = [f.clone()]
words[-1].text = w[1][1]
for i in w[2:]:
f = i[0].clone()
f.text=i[1]
words.append(f)
maxSize = max(maxSize,f.fontSize)
#deal with any leftovers on the final line
if words<>[]:
if currentWidth>self.width: self.width = currentWidth
lines.append(ParaLines(extraSpace=(maxWidth - currentWidth),wordCount=n,
words=words, fontSize=maxSize))
return ParaLines(kind=1, lines=lines)
return lines
def drawPara(self,debug=0):
"""Draws a paragraph according to the given style.
Returns the final y position at the bottom. Not safe for
paragraphs without spaces e.g. Japanese; wrapping
algorithm will go infinite."""
#stash the key facts locally for speed
canvas = self.canv
style = self.style
blPara = self.blPara
lines = blPara.lines
#work out the origin for line 1
cur_x = style.leftIndent
if debug:
# This boxes and shades stuff to show how the paragraph
# uses its space. Useful for self-documentation so
# the debug code stays!
# box the lot
canvas.rect(0, 0, self.width, self.height)
#left and right margins
canvas.saveState()
canvas.setFillColor(Color(0.9,0.9,0.9))
canvas.rect(0, 0, style.leftIndent, self.height)
canvas.rect(self.width - style.rightIndent, 0, style.rightIndent, self.height)
# shade above and below
canvas.setFillColor(Color(1.0,1.0,0.0))
canvas.restoreState()
#self.drawLine(x + style.leftIndent, y, x + style.leftIndent, cur_y)
nLines = len(lines)
bulletText = self.bulletText
if nLines > 0:
canvas.saveState()
canvas.addLiteral('%% %s.drawPara' % _className(self))
alignment = style.alignment
offset = style.firstLineIndent - style.leftIndent
lim = nLines-1
noJustifyLast = not (hasattr(self,'_JustifyLast') and self._JustifyLast)
if blPara.kind==0:
if alignment == TA_LEFT:
dpl = _leftDrawParaLine
elif alignment == TA_CENTER:
dpl = _centerDrawParaLine
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLine
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLine
f = blPara
cur_y = self.height - f.fontSize
if bulletText <> None:
offset = _drawBullet(canvas,offset,cur_y,bulletText,style)
#set up the font etc.
canvas._code.append('%s %s %s rg' % (f.textColor.red, f.textColor.green, f.textColor.blue))
tx = canvas.beginText(cur_x, cur_y)
#now the font for the rest of the paragraph
tx.setFont(f.fontName, f.fontSize, style.leading)
dpl( tx, offset, lines[0][0], lines[0][1], noJustifyLast and nLines==1)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in range(1, nLines):
dpl( tx, 0, lines[i][0], lines[i][1], noJustifyLast and i==lim)
else:
f = lines[0]
cur_y = self.height - f.fontSize
# default?
dpl = _leftDrawParaLineX
if bulletText <> None:
offset = _drawBullet(canvas,offset,cur_y,bulletText,style)
if alignment == TA_LEFT:
dpl = _leftDrawParaLineX
elif alignment == TA_CENTER:
dpl = _centerDrawParaLineX
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLineX
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLineX
else:
raise ValueError, "bad align %s" % repr(alignment)
#set up the font etc.
tx = canvas.beginText(cur_x, cur_y)
tx.XtraState=ABag()
tx.XtraState.textColor=None
tx.XtraState.rise=0
tx.setLeading(style.leading)
#f = lines[0].words[0]
#tx._setFont(f.fontName, f.fontSize)
tx._fontname,tx._fontsize = None, None
dpl( tx, offset, lines[0], noJustifyLast and nLines==1)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in range(1, nLines):
f = lines[i]
dpl( tx, 0, f, noJustifyLast and i==lim)
canvas.drawText(tx)
canvas.restoreState()
def getPlainText(self):
"""Convenience function for templates which want access
to the raw text, without XML tags. """
plains = []
for frag in self.frags:
plains.append(frag.text)
return string.join(plains, '')
def getActualLineWidths0(self):
"""Convenience function; tells you how wide each line
actually is. For justified styles, this will be
the same as the wrap width; for others it might be
useful for seeing if paragraphs will fit in spaces."""
assert hasattr(self, 'width'), "Cannot call this method before wrap()"
w = []
for frag in self.blPara.lines:
w.append(self.width - frag.extraSpace)
return w
if __name__=='__main__': #NORUNTESTS
def dumpParagraphLines(P):
print 'dumpParagraphLines(%s)' % str(P)
lines = P.blPara.lines
n =len(lines)
for l in range(n):
line = lines[l]
words = line.words
nwords = len(words)
print 'line%d: %d(%d)\n ' % (l,nwords,line.wordCount),
for w in range(nwords):
print "%d:'%s'"%(w,words[w].text),
print
def dumpParagraphFrags(P):
print 'dumpParagraphLines(%s)' % str(P)
frags = P.frags
n =len(frags)
for l in range(n):
print "frag%d: '%s'" % (l, frags[l].text)
l = 0
for W in _getFragWords(frags):
print "fragword%d: size=%d" % (l, W[0]),
for w in W[1:]:
print "'%s'" % w[1],
print
l = l + 1
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
style = ParagraphStyle("discussiontext", parent=B)
style.fontName= 'Helvetica'
text='''The <font name=courier color=green>CMYK</font> or subtractive method follows the way a printer
mixes three pigments (cyan, magenta, and yellow) to form colors.
Because mixing chemicals is more difficult than combining light there
is a fourth parameter for darkness. For example a chemical
combination of the <font name=courier color=green>CMY</font> pigments generally never makes a perfect
black -- instead producing a muddy color -- so, to get black printers
don't use the <font name=courier color=green>CMY</font> pigments but use a direct black ink. Because
<font name=courier color=green>CMYK</font> maps more directly to the way printer hardware works it may
be the case that &| & | colors specified in <font name=courier color=green>CMYK</font> will provide better fidelity
and better control when printed.
'''
P=Paragraph(text,style)
dumpParagraphFrags(P)
aW, aH = 456.0, 42.8
w,h = P.wrap(aW, aH)
dumpParagraphLines(P)
S = P.split(aW,aH)
for s in S:
s.wrap(aW,aH)
dumpParagraphLines(s)
aH = 500
P=Paragraph("""Price<super><font color="red">*</font></super>""", styleSheet['Normal'])
dumpParagraphFrags(P)
w,h = P.wrap(24, 200)
dumpParagraphLines(P)
text = """Dieses Kapitel bietet eine schnelle <b><font color=red>Programme :: starten</font></b>
<onDraw name=myIndex label="Programme :: starten">
<b><font color=red>Eingabeaufforderung :: (>>>)</font></b>
<onDraw name=myIndex label="Eingabeaufforderung :: (>>>)">
<b><font color=red>>>> (Eingabeaufforderung)</font></b>
<onDraw name=myIndex label=">>> (Eingabeaufforderung)">
Einfhrung in Python <b><font color=red>Python :: Einfhrung</font></b>
<onDraw name=myIndex label="Python :: Einfhrung">.
Das Ziel ist, die grundlegenden Eigenschaften von Python darzustellen, ohne
sich zu sehr in speziellen Regeln oder Details zu verstricken. Dazu behandelt
dieses Kapitel kurz die wesentlichen Konzepte wie Variablen, Ausdrcke,
Kontrollfluss, Funktionen sowie Ein- und Ausgabe. Es erhebt nicht den Anspruch,
umfassend zu sein."""
P=Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w,h = P.wrap(6*72, 9.7*72)
dumpParagraphLines(P)
|
# -*- coding: utf-8 -*-
from meregistro.registro.models.EstablecimientoInformacionEdilicia import EstablecimientoInformacionEdilicia
from meregistro.registro.models.Nivel import Nivel
from meregistro.registro.models.TipoCompartido import TipoCompartido
from meregistro.registro.models.TipoDominio import TipoDominio
from django.core.exceptions import ValidationError
from django import forms
class EstablecimientoInformacionEdiliciaForm(forms.ModelForm):
niveles = forms.ModelMultipleChoiceField(queryset = Nivel.objects.all().order_by('nombre'), widget = forms.CheckboxSelectMultiple, required = False)
class Meta:
model = EstablecimientoInformacionEdilicia
exclude = ['establecimiento']
def clean_tipo_compartido(self):
try:
tipo_dominio = self.cleaned_data['tipo_dominio']
tipo_compartido = self.cleaned_data['tipo_compartido']
if tipo_dominio.id == TipoDominio.objects.get(descripcion = 'Compartido').id:
if tipo_compartido is None:
raise ValidationError('Si el uso del edificio es compartido, debe detallar lo siguiente.')
except KeyError:
pass
return tipo_compartido
def clean_niveles(self):
niveles = self.cleaned_data['niveles']
try:
tipo_compartido = self.cleaned_data['tipo_compartido']
if tipo_compartido.id == TipoCompartido.objects.get(descripcion = 'Establecimiento de otro nivel').id:
if len(niveles) == 0:
raise ValidationError('Debe especificar con qué niveles comparte el edificio.')
except KeyError:
pass
return niveles
Se agrega chequeo en establecimiento información edilicia
# -*- coding: utf-8 -*-
from meregistro.registro.models.EstablecimientoInformacionEdilicia import EstablecimientoInformacionEdilicia
from meregistro.registro.models.Nivel import Nivel
from meregistro.registro.models.TipoCompartido import TipoCompartido
from meregistro.registro.models.TipoDominio import TipoDominio
from django.core.exceptions import ValidationError
from django import forms
class EstablecimientoInformacionEdiliciaForm(forms.ModelForm):
niveles = forms.ModelMultipleChoiceField(queryset = Nivel.objects.all().order_by('nombre'), widget = forms.CheckboxSelectMultiple, required = False)
class Meta:
model = EstablecimientoInformacionEdilicia
exclude = ['establecimiento']
def clean_tipo_compartido(self):
try:
tipo_dominio = self.cleaned_data['tipo_dominio']
tipo_compartido = self.cleaned_data['tipo_compartido']
if tipo_dominio.id == TipoDominio.objects.get(descripcion = 'Compartido').id:
if tipo_compartido is None:
raise ValidationError('Si el uso del edificio es compartido, debe detallar lo siguiente.')
except KeyError:
pass
return tipo_compartido
def clean_niveles(self):
niveles = self.cleaned_data['niveles']
try:
tipo_compartido = self.cleaned_data['tipo_compartido']
if tipo_compartido is not None and tipo_compartido.id == TipoCompartido.objects.get(descripcion = 'Establecimiento de otro nivel').id:
if len(niveles) == 0:
raise ValidationError('Debe especificar con qué niveles comparte el edificio.')
except KeyError:
pass
return niveles
|
def format_av(output, result):
if 'data' in result:
data = result['data']
if 'scan_results' in data:
res_list = data['scan_results'].values()
if len(res_list) > 1:
# if multiple output, filter None results
res = [item for item in res_list if item is not None]
output['result'] = " - ".join(res)
else:
output['result'] = res_list[0]
else:
output['result'] = "not parsed"
if 'name' in result and 'version' in data:
name = data['name']
version = data['version']
output['version'] = "{0} {1}".format(name, version)
elif 'name' in data:
output['version'] = data['name']
else:
output['result'] = "Error"
return
def format_vt(output, result):
if 'data' in result:
data = result['data'].values()[0]
if type(data) is int:
output['result'] = "error {0}".format(data)
if 'response_code' in data and data['response_code'] == 0:
output['result'] = "file never scanned"
if 'scans' in data:
scan = data['scans']
av_res = []
for av in ['ClamAV', 'Kaspersky', 'Symantec', 'McAfee', 'Sophos']:
if av in scan:
av_res.append("{0}:{1}".format(av, scan[av]['result']))
output['result'] = " - ".join(av_res)
if 'scan_date' in data:
output['version'] = data['scan_date']
else:
output['result'] = "Error"
return
def format_static(output, result):
if 'data' in result:
data = result['data'].values()[0]
if type(data) == dict:
res = []
for (k, v) in data.items():
if v is None:
res.append("{0}:none".format(k))
elif type(v) == list:
res.append("{0}:{1}".format(k, len(v)))
elif type(v) == int or type(v) == str:
res.append("{0}:{1}".format(k, v))
output['result'] = " / ".join(res)
else:
output['result'] = "no results"
else:
output['result'] = "Error"
return
def format_nsrl(output, _):
output['result'] = "no formatter"
output['version'] = "unknown"
return
def format_default(output, _):
output['result'] = "no formatter"
output['version'] = "unknown"
return
def sanitize_dict(d):
new = {}
for k, v in d.iteritems():
if isinstance(v, dict):
v = sanitize_dict(v)
newk = k.replace('.', '_').replace('$', '')
new[newk] = v
return new
probe_formatter = {
'Kaspersky': format_av,
'Sophos': format_av,
'McAfeeVSCL': format_av,
'ClamAV': format_av,
'Symantec': format_av,
'VirusTotal': format_vt,
'StaticAnalyzer': format_static,
'Nsrl': format_nsrl
}
def format_result(probe, raw_result):
res = {}
res['probe_res'] = sanitize_dict(raw_result)
formatter = probe_formatter.get(probe, format_default)
formatter(res, raw_result)
return res
add formatter support for FProt, EsetNod32, ComodoCAVL
def format_av(output, result):
if 'data' in result:
data = result['data']
if 'scan_results' in data:
res_list = data['scan_results'].values()
if len(res_list) > 1:
# if multiple output, filter None results
res = [item for item in res_list if item is not None]
output['result'] = " - ".join(res)
else:
output['result'] = res_list[0]
else:
output['result'] = "not parsed"
if 'name' in result and 'version' in data:
name = data['name']
version = data['version']
output['version'] = "{0} {1}".format(name, version)
elif 'name' in data:
output['version'] = data['name']
else:
output['result'] = "Error"
return
def format_vt(output, result):
if 'data' in result:
data = result['data'].values()[0]
if type(data) is int:
output['result'] = "error {0}".format(data)
if 'response_code' in data and data['response_code'] == 0:
output['result'] = "file never scanned"
if 'scans' in data:
scan = data['scans']
av_res = []
for av in ['ClamAV', 'Kaspersky', 'Symantec', 'McAfee', 'Sophos']:
if av in scan:
av_res.append("{0}:{1}".format(av, scan[av]['result']))
output['result'] = " - ".join(av_res)
if 'scan_date' in data:
output['version'] = data['scan_date']
else:
output['result'] = "Error"
return
def format_static(output, result):
if 'data' in result:
data = result['data'].values()[0]
if type(data) == dict:
res = []
for (k, v) in data.items():
if v is None:
res.append("{0}:none".format(k))
elif type(v) == list:
res.append("{0}:{1}".format(k, len(v)))
elif type(v) == int or type(v) == str:
res.append("{0}:{1}".format(k, v))
output['result'] = " / ".join(res)
else:
output['result'] = "no results"
else:
output['result'] = "Error"
return
def format_nsrl(output, _):
output['result'] = "no formatter"
output['version'] = "unknown"
return
def format_default(output, _):
output['result'] = "no formatter"
output['version'] = "unknown"
return
def sanitize_dict(d):
new = {}
for k, v in d.iteritems():
if isinstance(v, dict):
v = sanitize_dict(v)
newk = k.replace('.', '_').replace('$', '')
new[newk] = v
return new
probe_formatter = {
# antivirus
'ClamAV': format_av,
'ComodoCAVL': format_av,
'EsetNod32': format_av,
'FProt': format_av,
'Kaspersky': format_av,
'McAfeeVSCL': format_av,
'Sophos': format_av,
'Symantec': format_av,
# database
'Nsrl': format_nsrl,
# information
'StaticAnalyzer': format_static,
# web
'VirusTotal': format_vt,
}
def format_result(probe, raw_result):
res = {}
res['probe_res'] = sanitize_dict(raw_result)
formatter = probe_formatter.get(probe, format_default)
formatter(res, raw_result)
return res
|
#from django.contrib.gis.db import models
from django.db import models
from django import forms
import datetime
from fumblerooski.coaches.models import Coach, CoachingJob
CURRENT_SEASON = 2008
STATUS_CHOICES = (
('FR', 'Freshman'),
('SO', 'Sophomore'),
('JR', 'Junior'),
('SR', 'Senior'),
)
POSITION_TYPE_CHOICES = (
('O', 'Offense'),
('D', 'Defense'),
('S', 'Special Teams'),
)
SIDE_CHOICES = (
('O', 'Own'),
('P', 'Opponents'),
)
RESULT_CHOICES = (
('W', 'Win'),
('L', 'Loss'),
('T', 'Tie'),
)
GAME_TYPE_CHOICES = (
('H', 'Home'),
('A', 'Away'),
('N', 'Neutral Site'),
)
RANKINGTYPE_CHOICES = (
('T', 'Team'),
('P', 'Player'),
)
PLAY_CHOICES = (
('R', 'Run'),
('P', 'Pass'),
('F', 'Field Goal'),
('X', 'Extra Point'),
('N', 'Penalty'),
('K', 'Kickoff'),
('U', 'Punt'),
('T', 'Turnover'),
)
DIVISION_CHOICES = (
('B', 'Bowl Subdivision'),
('C', 'Championship Subdivision'),
('D', 'Division II'),
('T', 'Division III'),
)
class State(models.Model):
id = models.CharField(max_length=2, editable=False, primary_key=True)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return "/states/%s/" % self.id.lower()
class StateForm(forms.Form):
name = forms.ModelChoiceField(queryset=State.objects.all().order_by('name'))
class City(models.Model):
name = models.CharField(max_length=75)
slug = models.SlugField(max_length=75)
state = models.ForeignKey(State, null=True, blank=True)
# point = models.PointField()
# objects = models.GeoManager()
def __unicode__(self):
if self.state:
return "%s, %s" % (self.name, self.state.id)
else:
return self.name
def get_absolute_url(self):
return "/college/states/%s/%s/" % (self.state.id.lower(), self.slug)
class Meta:
verbose_name_plural = 'cities'
class Week(models.Model):
year = models.IntegerField()
week_num = models.IntegerField()
end_date = models.DateField()
def __unicode__(self):
return "Week %s, %s" % (self.week_num, self.year)
def week_games_url(self):
return "/college/seasons/%s/week/%s/" % (self.year, self.week_num)
class Conference(models.Model):
abbrev = models.CharField(max_length=10)
name = models.CharField(max_length=90)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/college/conferences/%s/' % self.abbrev.lower()
class College(models.Model):
name = models.CharField(max_length=90)
slug = models.SlugField(max_length=90)
drive_slug = models.CharField(max_length=90)
state = models.ForeignKey(State, blank=True)
official_url = models.CharField(max_length=120, blank=True)
official_rss = models.CharField(max_length=120, blank=True)
updated = models.BooleanField()
# objects = models.GeoManager()
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/college/teams/%s/' % self.slug
def current_record(self):
return "(%d-%d)" % (self.collegeyear_set.get(year=datetime.date.today().year).wins, self.collegeyear_set.get(year=datetime.date.today().year).losses)
class CollegeYear(models.Model):
college = models.ForeignKey(College)
year = models.IntegerField()
wins = models.IntegerField(default=0)
losses = models.IntegerField(default=0)
ties = models.IntegerField(default=0)
conference_wins = models.IntegerField(default=0)
conference_losses = models.IntegerField(default=0)
conference_ties = models.IntegerField(default=0)
freshmen = models.IntegerField(default=0)
sophomores = models.IntegerField(default=0)
juniors = models.IntegerField(default=0)
seniors = models.IntegerField(default=0)
conference = models.ForeignKey(Conference, null=True, blank=True)
division = models.CharField(max_length=1, choices=DIVISION_CHOICES)
def __unicode__(self):
return "%s - %s" % (self.college.name, str(self.year))
def game_count(self):
return self.wins+self.losses+self.ties
def get_ncaa_week_url(self):
return 'http://web1.ncaa.org/football/exec/rankingSummary?year=%d&org=%d&week=' % (self.year, self.college.id)
def get_absolute_url(self):
return "/college/teams/%s/%s/" % (self.college.slug, self.year)
def get_conference_url(self):
if self.conference:
return "/college/conference/%s/%s/" % (self.conference.abbrev, self.year)
def record(self):
if self.ties:
return "%s-%s-%s" % (self.wins, self.losses, self.ties)
else:
return "%s-%s" % (self.wins, self.losses)
class Meta:
ordering = ['college', '-year']
class CollegeCoach(models.Model):
coach = models.ForeignKey(Coach)
collegeyear = models.ForeignKey(CollegeYear)
jobs = models.ManyToManyField(CoachingJob)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
def __unicode__(self):
return "%s - %s" % (self.coach, self.collegeyear)
def jobs_display(self):
return ", ".join([x.name for x in self.jobs.all()])
def is_current_job(self):
if self.collegeyear.year == CURRENT_SEASON and self.end_date == None:
return True
else:
return False
def partial_season(self):
if end_date:
return True
else:
return False
class Meta:
verbose_name_plural = 'College coaches'
class CollegeTotal(models.Model):
college = models.ForeignKey(College)
year = models.IntegerField()
third_down_attempts = models.IntegerField(default=0)
third_down_conversions = models.IntegerField(default=0)
fourth_down_attempts = models.IntegerField(default=0)
fourth_down_conversions = models.IntegerField(default=0)
first_downs_rushing = models.IntegerField(default=0)
first_downs_passing = models.IntegerField(default=0)
first_downs_penalty = models.IntegerField(default=0)
first_downs_total = models.IntegerField(default=0)
penalties = models.IntegerField(default=0)
penalty_yards = models.IntegerField(default=0)
fumbles = models.IntegerField(default=0)
fumbles_lost = models.IntegerField(default=0)
rushes = models.IntegerField(default=0)
rush_gain = models.IntegerField(default=0)
rush_loss = models.IntegerField(default=0)
rush_net = models.IntegerField(default=0)
rush_touchdowns = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_attempts = models.IntegerField(default=0)
pass_completions = models.IntegerField(default=0)
pass_interceptions = models.IntegerField(default=0)
pass_yards = models.IntegerField(default=0)
pass_touchdowns = models.IntegerField(default=0)
receptions = models.IntegerField(default=0)
receiving_yards = models.IntegerField(default=0)
receiving_touchdowns = models.IntegerField(default=0)
punts = models.IntegerField(default=0)
punt_yards = models.IntegerField(default=0)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_touchdowns = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_touchdowns = models.IntegerField(default=0)
touchdowns = models.IntegerField(default=0)
pat_attempts = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_point_conversion_attempts = models.IntegerField(default=0)
two_point_conversions = models.IntegerField(default=0)
field_goal_attempts = models.IntegerField(default=0)
field_goals_made = models.IntegerField(default=0)
points = models.IntegerField(default=0)
class Position(models.Model):
abbrev = models.CharField(max_length=5)
name = models.CharField(max_length=25)
plural_name = models.CharField(max_length=25)
position_type = models.CharField(max_length=1, choices=POSITION_TYPE_CHOICES)
def __unicode__(self):
return self.abbrev
def get_absolute_url(self):
return '/recruits/positions/%s/' % self.abbrev.lower()
class BowlGame(models.Model):
name = models.CharField(max_length=75)
slug = models.CharField(max_length=75)
city = models.ForeignKey(City)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/college/bowl-games/%s/' % self.slug
class Game(models.Model):
season = models.IntegerField()
team1 = models.ForeignKey(College, related_name='first_team')
team2 = models.ForeignKey(College, related_name='second_team')
date = models.DateField()
week = models.ForeignKey(Week)
t1_game_type = models.CharField(max_length=1, choices=GAME_TYPE_CHOICES)
t1_result = models.CharField(max_length=1, choices=RESULT_CHOICES, blank=True)
team1_score = models.IntegerField(null=True, blank=True)
team2_score = models.IntegerField(null=True, blank=True)
site = models.CharField(max_length=90, blank=True)
attendance = models.IntegerField(null=True, blank=True)
overtime = models.CharField(max_length=5, blank=True)
ncaa_xml = models.CharField(max_length=120, blank=True)
duration = models.TimeField(null=True, blank=True)
has_drives = models.BooleanField()
has_stats = models.BooleanField()
has_player_stats = models.BooleanField()
is_conference_game = models.BooleanField()
is_bowl_game = models.BooleanField()
bowl_game = models.ForeignKey(BowlGame, null=True, blank=True)
def __unicode__(self):
return '%s vs. %s, %s' % (self.team1, self.team2, self.date)
def get_absolute_url(self):
return '/college/teams/%s/vs/%s/%s/%s/%s/' % (self.team1.slug, self.team2.slug, self.date.year, self.date.month, self.date.day)
def get_matchup_url(self):
return '/college/teams/%s/vs/%s/' % (self.team1.slug, self.team2.slug)
def get_reverse_url(self):
return '/college/teams/%s/vs/%s/%s/%s/%s/' % (self.team2.slug, self.team1.slug, self.date.year, self.date.month, self.date.day)
def get_ncaa_xml_url(self):
return 'http://web1.ncaa.org/d1mfb/%s/Internet/worksheets/%s.xml' % (self.season, self.ncaa_xml.strip())
def get_ncaa_drive_url(self):
return "http://web1.ncaa.org/mfb/driveSummary.jsp?acadyr=%s&h=%s&v=%s&date=%s&game=%s" % (self.season, self.team1.id, self.team2.id, self.date.strftime("%d-%b-%y").upper(), self.ncaa_xml.strip())
def get_play_by_play_url(self):
return "http://web1.ncaa.org/mfb/driveSummary.jsp?expand=A&acadyr=%s&h=%s&v=%s&date=%s&game=%s" % (self.season, self.team1.id, self.team2.id, self.date.strftime("%d-%b-%y").upper(), self.ncaa_xml.strip())
def margin(self):
return self.team1_score-self.team2_score
def display(self):
if self.margin() > 0:
return "%s %s, %s %s" % (self.team1, self.team1_score, self.team2, self.team2_score)
else:
return "%s %s, %s %s" % (self.team2, self.team2_score, self.team1, self.team1_score)
class DriveOutcome(models.Model):
abbrev = models.CharField(max_length=10)
name = models.CharField(max_length=50, null=True)
slug = models.SlugField(max_length=50, null=True)
def __unicode__(self):
return self.name
class GameDrive(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(College)
drive = models.IntegerField()
quarter = models.PositiveSmallIntegerField()
start_how = models.CharField(max_length=25)
start_time = models.TimeField()
start_position = models.IntegerField()
start_side = models.CharField(max_length=1, choices=SIDE_CHOICES)
end_result = models.ForeignKey(DriveOutcome)
end_time = models.TimeField()
end_position = models.IntegerField(null=True)
end_side = models.CharField(max_length=1, choices=SIDE_CHOICES)
plays = models.IntegerField()
yards = models.IntegerField()
time_of_possession = models.TimeField()
def __unicode__(self):
return "%s: %s drive %s" % (self.game, self.team, self.drive)
class GameOffense(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(College)
year = models.IntegerField()
third_down_attempts = models.IntegerField(default=0)
third_down_conversions = models.IntegerField(default=0)
fourth_down_attempts = models.IntegerField(default=0)
fourth_down_conversions = models.IntegerField(default=0)
time_of_possession = models.TimeField(null=True)
first_downs_rushing = models.IntegerField(default=0)
first_downs_passing = models.IntegerField(default=0)
first_downs_penalty = models.IntegerField(default=0)
first_downs_total = models.IntegerField(default=0)
penalties = models.IntegerField(default=0)
penalty_yards = models.IntegerField(default=0)
fumbles = models.IntegerField(default=0)
fumbles_lost = models.IntegerField(default=0)
rushes = models.IntegerField(default=0)
rush_gain = models.IntegerField(default=0)
rush_loss = models.IntegerField(default=0)
rush_net = models.IntegerField(default=0)
rush_touchdowns = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_attempts = models.IntegerField(default=0)
pass_completions = models.IntegerField(default=0)
pass_interceptions = models.IntegerField(default=0)
pass_yards = models.IntegerField(default=0)
pass_touchdowns = models.IntegerField(default=0)
receptions = models.IntegerField(default=0)
receiving_yards = models.IntegerField(default=0)
receiving_touchdowns = models.IntegerField(default=0)
punts = models.IntegerField(default=0)
punt_yards = models.IntegerField(default=0)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_touchdowns = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_touchdowns = models.IntegerField(default=0)
touchdowns = models.IntegerField(default=0)
pat_attempts = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_point_conversion_attempts = models.IntegerField(default=0)
two_point_conversions = models.IntegerField(default=0)
field_goal_attempts = models.IntegerField(default=0)
field_goals_made = models.IntegerField(default=0)
points = models.IntegerField(default=0)
def __unicode__(self):
return '%s - %s' % (self.game, self.team)
def third_down_rate(self):
return float(self.third_down_conversions)/float(self.third_down_attempts)
def field_goal_rate(self):
return float(self.field_goals_made)/float(self.field_goal_attempts)
def penalty_yard_ratio(self):
return float(self.penalty_yards)/float(self.total_yards)
def yards_per_reception(self):
return float(self.receiving_yards)/float(self.receptions)
def yards_per_pass_attempt(self):
return float(self.receiving_yards)/(self.pass_attempts)
def rushing_first_downs_pct(self):
return float(self.first_downs_rushing)/float(self.first_downs_total)*100
"""
Returns a floating-point number representing the number
of touchdowns per rushing attempt for a single game.
"""
def touchdowns_per_rushes(self):
return float(self.rush_touchdowns)/float(self.rushes)*100
"""
Returns the opponent for a team's given Game Offense record.
"""
def opponent(self):
if self.team == self.game.team2:
return self.game.team1
else:
return self.game.team2
class GameDefense(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(College)
safeties = models.IntegerField(default=0)
unassisted_tackles = models.IntegerField(default=0)
assisted_tackles = models.IntegerField(default=0)
unassisted_tackles_for_loss = models.IntegerField(default=0)
assisted_tackles_for_loss = models.IntegerField(default=0)
tackles_for_loss_yards = models.IntegerField(default=0)
unassisted_sacks = models.IntegerField(default=0)
assisted_sacks = models.IntegerField(default=0)
sack_yards = models.IntegerField(default=0)
defensive_interceptions = models.IntegerField(default=0)
defensive_interception_yards = models.IntegerField(default=0)
defensive_interception_touchdowns = models.IntegerField(default=0)
pass_breakups = models.IntegerField(default=0)
fumbles_forced = models.IntegerField(default=0)
fumbles_number = models.IntegerField(default=0)
fumbles_yards = models.IntegerField(default=0)
fumbles_touchdowns = models.IntegerField(default=0)
def __unicode__(self):
return '%s - %s' % (self.game, self.team)
class Player(models.Model):
name = models.CharField(max_length=120)
slug = models.SlugField(max_length=120)
team = models.ForeignKey(College)
year = models.IntegerField()
position = models.ForeignKey(Position)
number = models.CharField(max_length=4)
games_played = models.PositiveIntegerField(default=0)
status = models.CharField(max_length=2, choices=STATUS_CHOICES)
def __unicode__(self):
return "%s - %s" % (self.name, self.team)
def get_absolute_url(self):
return '/college/teams/%s/%s/players/%s/' % (self.team.slug, self.year, self.slug)
class PlayerCollegeCareer(models.Model):
player = models.ForeignKey(Player)
first_season = models.ForeignKey(CollegeYear, related_name='first_season')
last_season = models.ForeignKey(CollegeYear, related_name='last_season')
total_games = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return self.player.name.full_name()
class PlayerGame(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
played = models.BooleanField()
total_plays = models.IntegerField()
total_yards = models.IntegerField()
def __unicode__(self):
return self.player.name
class PlayerRush(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
rushes = models.IntegerField(default=0)
gain = models.IntegerField(default=0)
loss = models.IntegerField(default=0)
net = models.IntegerField(default=0)
td = models.IntegerField(default=0)
long_yards = models.IntegerField(default=0)
average = models.FloatField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class Meta:
verbose_name_plural = "player rushing"
class PlayerPass(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
attempts = models.IntegerField(default=0)
completions = models.IntegerField(default=0)
interceptions = models.IntegerField(default=0)
yards = models.IntegerField(default=0)
td = models.IntegerField(default=0)
conversions = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_efficiency = models.FloatField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def comp_att(self):
return "%d of %d" % (self.completions, self.attempts)
class Meta:
verbose_name_plural = 'player passing'
class PlayerReceiving(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
receptions = models.IntegerField(default=0)
yards = models.IntegerField(default=0)
td = models.IntegerField(default=0)
long_yards = models.IntegerField(default=0)
average = models.FloatField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerScoring(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
td = models.IntegerField(default=0)
fg_att = models.IntegerField(default=0)
fg_made = models.IntegerField(default=0)
pat_att = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_pt_att = models.IntegerField(default=0)
two_pt_made = models.IntegerField(default=0)
def_pat_att = models.IntegerField(default=0)
def_pat_made = models.IntegerField(default=0)
def_two_pt_att = models.IntegerField(default=0)
def_two_pt_made = models.IntegerField(default=0)
safeties = models.IntegerField(default=0)
points = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerTackle(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
unassisted_tackles = models.IntegerField(default=0)
assisted_tackles = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def total_tackles(self):
return self.unassisted_tackles+self.assisted_tackles
class PlayerTacklesLoss(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
unassisted_tackles_for_loss = models.IntegerField(default=0)
assisted_tackles_for_loss = models.IntegerField(default=0)
tackles_for_loss_yards = models.IntegerField(default=0)
unassisted_sacks = models.IntegerField(default=0)
assisted_sacks = models.IntegerField(default=0)
sack_yards = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def total_sacks(self):
return self.unassisted_sacks+self.assisted_sacks
def total_tackles_for_loss(self):
return self.unassisted_tackles_for_loss+self.assisted_tackles_for_loss
class Meta:
verbose_name_plural = 'player tackles for loss'
class PlayerPassDefense(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
interceptions = models.IntegerField(default=0)
interception_yards = models.IntegerField(default=0)
interception_td = models.IntegerField(default=0)
pass_breakups = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerFumble(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
fumbles_forced = models.IntegerField(default=0)
fumbles_number = models.IntegerField(default=0)
fumbles_yards = models.IntegerField(default=0)
fumbles_td = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerReturn(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_td = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_td = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerSummary(models.Model):
player = models.ForeignKey(Player)
rushes = models.IntegerField(null=True)
rush_gain = models.IntegerField(null=True)
rush_loss = models.IntegerField(null=True)
rush_net = models.IntegerField(null=True)
rush_td = models.IntegerField(null=True)
pass_attempts = models.IntegerField(null=True)
pass_complete = models.IntegerField(null=True)
pass_intercept = models.IntegerField(null=True)
pass_yards = models.IntegerField(null=True)
pass_td = models.IntegerField(null=True)
conversions = models.IntegerField(null=True)
offense_plays = models.IntegerField(null=True)
offense_yards = models.IntegerField(null=True)
receptions = models.IntegerField(null=True)
reception_yards = models.IntegerField(null=True)
reception_td = models.IntegerField(null=True)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.player.year)
class RankingType(models.Model):
name = models.CharField(max_length=75)
slug = models.SlugField(max_length=75)
typename = models.CharField(max_length=1, choices=RANKINGTYPE_CHOICES)
ncaa_name = models.CharField(max_length=75)
def __unicode__(self):
return self.name
def get_current_url(self):
return "/college/rankings/%s/%s/" % (self.slug, CURRENT_SEASON)
def get_partial_url(self):
return "/college/rankings/%s/" % self.slug
class Ranking(models.Model):
ranking_type = models.ForeignKey(RankingType)
college = models.ForeignKey(College)
year = models.IntegerField()
week = models.ForeignKey(Week)
rank = models.PositiveIntegerField()
is_tied = models.BooleanField()
actual = models.FloatField()
conference_rank = models.PositiveIntegerField(null=True)
is_conf_tied = models.BooleanField()
division = models.CharField(max_length=1)
def __unicode__(self):
return "%s - %s, %s (%s)" % (self.ranking_type, self.college, self.year, self.week)
def get_week_url(self):
return "/college/rankings/%s/%s/week/%s/" % (self.ranking_type.slug, self.year, self.week.week_num)
class RushingSummary(models.Model):
player = models.ForeignKey(Player)
year = models.IntegerField()
week = models.ForeignKey(Week)
rank = models.PositiveIntegerField()
is_tied = models.BooleanField()
carries = models.PositiveIntegerField()
net = models.PositiveIntegerField()
td = models.PositiveIntegerField()
average = models.FloatField()
yards_per_game = models.FloatField()
def __unicode__(self):
return "%s - %s, %s" (self.player, self.year, self.yards_per_game)
class Poll(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50)
def __unicode__(self):
return self.name
class PollResults(models.Model):
poll = models.ForeignKey(Poll)
week = models.ForeignKey(Week)
team = models.ForeignKey(College)
rank = models.IntegerField()
def __unicode__(self):
return "%s: %s %s" % (self.poll, self.week, self.team)
fixed conference url
#from django.contrib.gis.db import models
from django.db import models
from django import forms
import datetime
from fumblerooski.coaches.models import Coach, CoachingJob
CURRENT_SEASON = 2008
STATUS_CHOICES = (
('FR', 'Freshman'),
('SO', 'Sophomore'),
('JR', 'Junior'),
('SR', 'Senior'),
)
POSITION_TYPE_CHOICES = (
('O', 'Offense'),
('D', 'Defense'),
('S', 'Special Teams'),
)
SIDE_CHOICES = (
('O', 'Own'),
('P', 'Opponents'),
)
RESULT_CHOICES = (
('W', 'Win'),
('L', 'Loss'),
('T', 'Tie'),
)
GAME_TYPE_CHOICES = (
('H', 'Home'),
('A', 'Away'),
('N', 'Neutral Site'),
)
RANKINGTYPE_CHOICES = (
('T', 'Team'),
('P', 'Player'),
)
PLAY_CHOICES = (
('R', 'Run'),
('P', 'Pass'),
('F', 'Field Goal'),
('X', 'Extra Point'),
('N', 'Penalty'),
('K', 'Kickoff'),
('U', 'Punt'),
('T', 'Turnover'),
)
DIVISION_CHOICES = (
('B', 'Bowl Subdivision'),
('C', 'Championship Subdivision'),
('D', 'Division II'),
('T', 'Division III'),
)
class State(models.Model):
id = models.CharField(max_length=2, editable=False, primary_key=True)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return "/states/%s/" % self.id.lower()
class StateForm(forms.Form):
name = forms.ModelChoiceField(queryset=State.objects.all().order_by('name'))
class City(models.Model):
name = models.CharField(max_length=75)
slug = models.SlugField(max_length=75)
state = models.ForeignKey(State, null=True, blank=True)
# point = models.PointField()
# objects = models.GeoManager()
def __unicode__(self):
if self.state:
return "%s, %s" % (self.name, self.state.id)
else:
return self.name
def get_absolute_url(self):
return "/college/states/%s/%s/" % (self.state.id.lower(), self.slug)
class Meta:
verbose_name_plural = 'cities'
class Week(models.Model):
year = models.IntegerField()
week_num = models.IntegerField()
end_date = models.DateField()
def __unicode__(self):
return "Week %s, %s" % (self.week_num, self.year)
def week_games_url(self):
return "/college/seasons/%s/week/%s/" % (self.year, self.week_num)
class Conference(models.Model):
abbrev = models.CharField(max_length=10)
name = models.CharField(max_length=90)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/college/conferences/%s/' % self.abbrev.lower()
class College(models.Model):
name = models.CharField(max_length=90)
slug = models.SlugField(max_length=90)
drive_slug = models.CharField(max_length=90)
state = models.ForeignKey(State, blank=True)
official_url = models.CharField(max_length=120, blank=True)
official_rss = models.CharField(max_length=120, blank=True)
updated = models.BooleanField()
# objects = models.GeoManager()
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/college/teams/%s/' % self.slug
def current_record(self):
return "(%d-%d)" % (self.collegeyear_set.get(year=datetime.date.today().year).wins, self.collegeyear_set.get(year=datetime.date.today().year).losses)
class CollegeYear(models.Model):
college = models.ForeignKey(College)
year = models.IntegerField()
wins = models.IntegerField(default=0)
losses = models.IntegerField(default=0)
ties = models.IntegerField(default=0)
conference_wins = models.IntegerField(default=0)
conference_losses = models.IntegerField(default=0)
conference_ties = models.IntegerField(default=0)
freshmen = models.IntegerField(default=0)
sophomores = models.IntegerField(default=0)
juniors = models.IntegerField(default=0)
seniors = models.IntegerField(default=0)
conference = models.ForeignKey(Conference, null=True, blank=True)
division = models.CharField(max_length=1, choices=DIVISION_CHOICES)
def __unicode__(self):
return "%s - %s" % (self.college.name, str(self.year))
def game_count(self):
return self.wins+self.losses+self.ties
def get_ncaa_week_url(self):
return 'http://web1.ncaa.org/football/exec/rankingSummary?year=%d&org=%d&week=' % (self.year, self.college.id)
def get_absolute_url(self):
return "/college/teams/%s/%s/" % (self.college.slug, self.year)
def get_conference_url(self):
if self.conference:
return "/college/conferences/%s/%s/" % (self.conference.abbrev, self.year)
def record(self):
if self.ties:
return "%s-%s-%s" % (self.wins, self.losses, self.ties)
else:
return "%s-%s" % (self.wins, self.losses)
class Meta:
ordering = ['college', '-year']
class CollegeCoach(models.Model):
coach = models.ForeignKey(Coach)
collegeyear = models.ForeignKey(CollegeYear)
jobs = models.ManyToManyField(CoachingJob)
start_date = models.DateField(null=True, blank=True)
end_date = models.DateField(null=True, blank=True)
def __unicode__(self):
return "%s - %s" % (self.coach, self.collegeyear)
def jobs_display(self):
return ", ".join([x.name for x in self.jobs.all()])
def is_current_job(self):
if self.collegeyear.year == CURRENT_SEASON and self.end_date == None:
return True
else:
return False
def partial_season(self):
if end_date:
return True
else:
return False
class Meta:
verbose_name_plural = 'College coaches'
class CollegeTotal(models.Model):
college = models.ForeignKey(College)
year = models.IntegerField()
third_down_attempts = models.IntegerField(default=0)
third_down_conversions = models.IntegerField(default=0)
fourth_down_attempts = models.IntegerField(default=0)
fourth_down_conversions = models.IntegerField(default=0)
first_downs_rushing = models.IntegerField(default=0)
first_downs_passing = models.IntegerField(default=0)
first_downs_penalty = models.IntegerField(default=0)
first_downs_total = models.IntegerField(default=0)
penalties = models.IntegerField(default=0)
penalty_yards = models.IntegerField(default=0)
fumbles = models.IntegerField(default=0)
fumbles_lost = models.IntegerField(default=0)
rushes = models.IntegerField(default=0)
rush_gain = models.IntegerField(default=0)
rush_loss = models.IntegerField(default=0)
rush_net = models.IntegerField(default=0)
rush_touchdowns = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_attempts = models.IntegerField(default=0)
pass_completions = models.IntegerField(default=0)
pass_interceptions = models.IntegerField(default=0)
pass_yards = models.IntegerField(default=0)
pass_touchdowns = models.IntegerField(default=0)
receptions = models.IntegerField(default=0)
receiving_yards = models.IntegerField(default=0)
receiving_touchdowns = models.IntegerField(default=0)
punts = models.IntegerField(default=0)
punt_yards = models.IntegerField(default=0)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_touchdowns = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_touchdowns = models.IntegerField(default=0)
touchdowns = models.IntegerField(default=0)
pat_attempts = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_point_conversion_attempts = models.IntegerField(default=0)
two_point_conversions = models.IntegerField(default=0)
field_goal_attempts = models.IntegerField(default=0)
field_goals_made = models.IntegerField(default=0)
points = models.IntegerField(default=0)
class Position(models.Model):
abbrev = models.CharField(max_length=5)
name = models.CharField(max_length=25)
plural_name = models.CharField(max_length=25)
position_type = models.CharField(max_length=1, choices=POSITION_TYPE_CHOICES)
def __unicode__(self):
return self.abbrev
def get_absolute_url(self):
return '/recruits/positions/%s/' % self.abbrev.lower()
class BowlGame(models.Model):
name = models.CharField(max_length=75)
slug = models.CharField(max_length=75)
city = models.ForeignKey(City)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return '/college/bowl-games/%s/' % self.slug
class Game(models.Model):
season = models.IntegerField()
team1 = models.ForeignKey(College, related_name='first_team')
team2 = models.ForeignKey(College, related_name='second_team')
date = models.DateField()
week = models.ForeignKey(Week)
t1_game_type = models.CharField(max_length=1, choices=GAME_TYPE_CHOICES)
t1_result = models.CharField(max_length=1, choices=RESULT_CHOICES, blank=True)
team1_score = models.IntegerField(null=True, blank=True)
team2_score = models.IntegerField(null=True, blank=True)
site = models.CharField(max_length=90, blank=True)
attendance = models.IntegerField(null=True, blank=True)
overtime = models.CharField(max_length=5, blank=True)
ncaa_xml = models.CharField(max_length=120, blank=True)
duration = models.TimeField(null=True, blank=True)
has_drives = models.BooleanField()
has_stats = models.BooleanField()
has_player_stats = models.BooleanField()
is_conference_game = models.BooleanField()
is_bowl_game = models.BooleanField()
bowl_game = models.ForeignKey(BowlGame, null=True, blank=True)
def __unicode__(self):
return '%s vs. %s, %s' % (self.team1, self.team2, self.date)
def get_absolute_url(self):
return '/college/teams/%s/vs/%s/%s/%s/%s/' % (self.team1.slug, self.team2.slug, self.date.year, self.date.month, self.date.day)
def get_matchup_url(self):
return '/college/teams/%s/vs/%s/' % (self.team1.slug, self.team2.slug)
def get_reverse_url(self):
return '/college/teams/%s/vs/%s/%s/%s/%s/' % (self.team2.slug, self.team1.slug, self.date.year, self.date.month, self.date.day)
def get_ncaa_xml_url(self):
return 'http://web1.ncaa.org/d1mfb/%s/Internet/worksheets/%s.xml' % (self.season, self.ncaa_xml.strip())
def get_ncaa_drive_url(self):
return "http://web1.ncaa.org/mfb/driveSummary.jsp?acadyr=%s&h=%s&v=%s&date=%s&game=%s" % (self.season, self.team1.id, self.team2.id, self.date.strftime("%d-%b-%y").upper(), self.ncaa_xml.strip())
def get_play_by_play_url(self):
return "http://web1.ncaa.org/mfb/driveSummary.jsp?expand=A&acadyr=%s&h=%s&v=%s&date=%s&game=%s" % (self.season, self.team1.id, self.team2.id, self.date.strftime("%d-%b-%y").upper(), self.ncaa_xml.strip())
def margin(self):
return self.team1_score-self.team2_score
def display(self):
if self.margin() > 0:
return "%s %s, %s %s" % (self.team1, self.team1_score, self.team2, self.team2_score)
else:
return "%s %s, %s %s" % (self.team2, self.team2_score, self.team1, self.team1_score)
class DriveOutcome(models.Model):
abbrev = models.CharField(max_length=10)
name = models.CharField(max_length=50, null=True)
slug = models.SlugField(max_length=50, null=True)
def __unicode__(self):
return self.name
class GameDrive(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(College)
drive = models.IntegerField()
quarter = models.PositiveSmallIntegerField()
start_how = models.CharField(max_length=25)
start_time = models.TimeField()
start_position = models.IntegerField()
start_side = models.CharField(max_length=1, choices=SIDE_CHOICES)
end_result = models.ForeignKey(DriveOutcome)
end_time = models.TimeField()
end_position = models.IntegerField(null=True)
end_side = models.CharField(max_length=1, choices=SIDE_CHOICES)
plays = models.IntegerField()
yards = models.IntegerField()
time_of_possession = models.TimeField()
def __unicode__(self):
return "%s: %s drive %s" % (self.game, self.team, self.drive)
class GameOffense(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(College)
year = models.IntegerField()
third_down_attempts = models.IntegerField(default=0)
third_down_conversions = models.IntegerField(default=0)
fourth_down_attempts = models.IntegerField(default=0)
fourth_down_conversions = models.IntegerField(default=0)
time_of_possession = models.TimeField(null=True)
first_downs_rushing = models.IntegerField(default=0)
first_downs_passing = models.IntegerField(default=0)
first_downs_penalty = models.IntegerField(default=0)
first_downs_total = models.IntegerField(default=0)
penalties = models.IntegerField(default=0)
penalty_yards = models.IntegerField(default=0)
fumbles = models.IntegerField(default=0)
fumbles_lost = models.IntegerField(default=0)
rushes = models.IntegerField(default=0)
rush_gain = models.IntegerField(default=0)
rush_loss = models.IntegerField(default=0)
rush_net = models.IntegerField(default=0)
rush_touchdowns = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_attempts = models.IntegerField(default=0)
pass_completions = models.IntegerField(default=0)
pass_interceptions = models.IntegerField(default=0)
pass_yards = models.IntegerField(default=0)
pass_touchdowns = models.IntegerField(default=0)
receptions = models.IntegerField(default=0)
receiving_yards = models.IntegerField(default=0)
receiving_touchdowns = models.IntegerField(default=0)
punts = models.IntegerField(default=0)
punt_yards = models.IntegerField(default=0)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_touchdowns = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_touchdowns = models.IntegerField(default=0)
touchdowns = models.IntegerField(default=0)
pat_attempts = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_point_conversion_attempts = models.IntegerField(default=0)
two_point_conversions = models.IntegerField(default=0)
field_goal_attempts = models.IntegerField(default=0)
field_goals_made = models.IntegerField(default=0)
points = models.IntegerField(default=0)
def __unicode__(self):
return '%s - %s' % (self.game, self.team)
def third_down_rate(self):
return float(self.third_down_conversions)/float(self.third_down_attempts)
def field_goal_rate(self):
return float(self.field_goals_made)/float(self.field_goal_attempts)
def penalty_yard_ratio(self):
return float(self.penalty_yards)/float(self.total_yards)
def yards_per_reception(self):
return float(self.receiving_yards)/float(self.receptions)
def yards_per_pass_attempt(self):
return float(self.receiving_yards)/(self.pass_attempts)
def rushing_first_downs_pct(self):
return float(self.first_downs_rushing)/float(self.first_downs_total)*100
"""
Returns a floating-point number representing the number
of touchdowns per rushing attempt for a single game.
"""
def touchdowns_per_rushes(self):
return float(self.rush_touchdowns)/float(self.rushes)*100
"""
Returns the opponent for a team's given Game Offense record.
"""
def opponent(self):
if self.team == self.game.team2:
return self.game.team1
else:
return self.game.team2
class GameDefense(models.Model):
game = models.ForeignKey(Game)
team = models.ForeignKey(College)
safeties = models.IntegerField(default=0)
unassisted_tackles = models.IntegerField(default=0)
assisted_tackles = models.IntegerField(default=0)
unassisted_tackles_for_loss = models.IntegerField(default=0)
assisted_tackles_for_loss = models.IntegerField(default=0)
tackles_for_loss_yards = models.IntegerField(default=0)
unassisted_sacks = models.IntegerField(default=0)
assisted_sacks = models.IntegerField(default=0)
sack_yards = models.IntegerField(default=0)
defensive_interceptions = models.IntegerField(default=0)
defensive_interception_yards = models.IntegerField(default=0)
defensive_interception_touchdowns = models.IntegerField(default=0)
pass_breakups = models.IntegerField(default=0)
fumbles_forced = models.IntegerField(default=0)
fumbles_number = models.IntegerField(default=0)
fumbles_yards = models.IntegerField(default=0)
fumbles_touchdowns = models.IntegerField(default=0)
def __unicode__(self):
return '%s - %s' % (self.game, self.team)
class Player(models.Model):
name = models.CharField(max_length=120)
slug = models.SlugField(max_length=120)
team = models.ForeignKey(College)
year = models.IntegerField()
position = models.ForeignKey(Position)
number = models.CharField(max_length=4)
games_played = models.PositiveIntegerField(default=0)
status = models.CharField(max_length=2, choices=STATUS_CHOICES)
def __unicode__(self):
return "%s - %s" % (self.name, self.team)
def get_absolute_url(self):
return '/college/teams/%s/%s/players/%s/' % (self.team.slug, self.year, self.slug)
class PlayerCollegeCareer(models.Model):
player = models.ForeignKey(Player)
first_season = models.ForeignKey(CollegeYear, related_name='first_season')
last_season = models.ForeignKey(CollegeYear, related_name='last_season')
total_games = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return self.player.name.full_name()
class PlayerGame(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
played = models.BooleanField()
total_plays = models.IntegerField()
total_yards = models.IntegerField()
def __unicode__(self):
return self.player.name
class PlayerRush(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
rushes = models.IntegerField(default=0)
gain = models.IntegerField(default=0)
loss = models.IntegerField(default=0)
net = models.IntegerField(default=0)
td = models.IntegerField(default=0)
long_yards = models.IntegerField(default=0)
average = models.FloatField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class Meta:
verbose_name_plural = "player rushing"
class PlayerPass(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
attempts = models.IntegerField(default=0)
completions = models.IntegerField(default=0)
interceptions = models.IntegerField(default=0)
yards = models.IntegerField(default=0)
td = models.IntegerField(default=0)
conversions = models.IntegerField(default=0)
total_plays = models.IntegerField(default=0)
total_yards = models.IntegerField(default=0)
pass_efficiency = models.FloatField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def comp_att(self):
return "%d of %d" % (self.completions, self.attempts)
class Meta:
verbose_name_plural = 'player passing'
class PlayerReceiving(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
receptions = models.IntegerField(default=0)
yards = models.IntegerField(default=0)
td = models.IntegerField(default=0)
long_yards = models.IntegerField(default=0)
average = models.FloatField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerScoring(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
td = models.IntegerField(default=0)
fg_att = models.IntegerField(default=0)
fg_made = models.IntegerField(default=0)
pat_att = models.IntegerField(default=0)
pat_made = models.IntegerField(default=0)
two_pt_att = models.IntegerField(default=0)
two_pt_made = models.IntegerField(default=0)
def_pat_att = models.IntegerField(default=0)
def_pat_made = models.IntegerField(default=0)
def_two_pt_att = models.IntegerField(default=0)
def_two_pt_made = models.IntegerField(default=0)
safeties = models.IntegerField(default=0)
points = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerTackle(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
unassisted_tackles = models.IntegerField(default=0)
assisted_tackles = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def total_tackles(self):
return self.unassisted_tackles+self.assisted_tackles
class PlayerTacklesLoss(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
unassisted_tackles_for_loss = models.IntegerField(default=0)
assisted_tackles_for_loss = models.IntegerField(default=0)
tackles_for_loss_yards = models.IntegerField(default=0)
unassisted_sacks = models.IntegerField(default=0)
assisted_sacks = models.IntegerField(default=0)
sack_yards = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
def total_sacks(self):
return self.unassisted_sacks+self.assisted_sacks
def total_tackles_for_loss(self):
return self.unassisted_tackles_for_loss+self.assisted_tackles_for_loss
class Meta:
verbose_name_plural = 'player tackles for loss'
class PlayerPassDefense(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
interceptions = models.IntegerField(default=0)
interception_yards = models.IntegerField(default=0)
interception_td = models.IntegerField(default=0)
pass_breakups = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerFumble(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
fumbles_forced = models.IntegerField(default=0)
fumbles_number = models.IntegerField(default=0)
fumbles_yards = models.IntegerField(default=0)
fumbles_td = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerReturn(models.Model):
player = models.ForeignKey(Player)
game = models.ForeignKey(Game)
punt_returns = models.IntegerField(default=0)
punt_return_yards = models.IntegerField(default=0)
punt_return_td = models.IntegerField(default=0)
kickoff_returns = models.IntegerField(default=0)
kickoff_return_yards = models.IntegerField(default=0)
kickoff_return_td = models.IntegerField(default=0)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.game)
class PlayerSummary(models.Model):
player = models.ForeignKey(Player)
rushes = models.IntegerField(null=True)
rush_gain = models.IntegerField(null=True)
rush_loss = models.IntegerField(null=True)
rush_net = models.IntegerField(null=True)
rush_td = models.IntegerField(null=True)
pass_attempts = models.IntegerField(null=True)
pass_complete = models.IntegerField(null=True)
pass_intercept = models.IntegerField(null=True)
pass_yards = models.IntegerField(null=True)
pass_td = models.IntegerField(null=True)
conversions = models.IntegerField(null=True)
offense_plays = models.IntegerField(null=True)
offense_yards = models.IntegerField(null=True)
receptions = models.IntegerField(null=True)
reception_yards = models.IntegerField(null=True)
reception_td = models.IntegerField(null=True)
def __unicode__(self):
return "%s - %s" % (self.player.name, self.player.year)
class RankingType(models.Model):
name = models.CharField(max_length=75)
slug = models.SlugField(max_length=75)
typename = models.CharField(max_length=1, choices=RANKINGTYPE_CHOICES)
ncaa_name = models.CharField(max_length=75)
def __unicode__(self):
return self.name
def get_current_url(self):
return "/college/rankings/%s/%s/" % (self.slug, CURRENT_SEASON)
def get_partial_url(self):
return "/college/rankings/%s/" % self.slug
class Ranking(models.Model):
ranking_type = models.ForeignKey(RankingType)
college = models.ForeignKey(College)
year = models.IntegerField()
week = models.ForeignKey(Week)
rank = models.PositiveIntegerField()
is_tied = models.BooleanField()
actual = models.FloatField()
conference_rank = models.PositiveIntegerField(null=True)
is_conf_tied = models.BooleanField()
division = models.CharField(max_length=1)
def __unicode__(self):
return "%s - %s, %s (%s)" % (self.ranking_type, self.college, self.year, self.week)
def get_week_url(self):
return "/college/rankings/%s/%s/week/%s/" % (self.ranking_type.slug, self.year, self.week.week_num)
class RushingSummary(models.Model):
player = models.ForeignKey(Player)
year = models.IntegerField()
week = models.ForeignKey(Week)
rank = models.PositiveIntegerField()
is_tied = models.BooleanField()
carries = models.PositiveIntegerField()
net = models.PositiveIntegerField()
td = models.PositiveIntegerField()
average = models.FloatField()
yards_per_game = models.FloatField()
def __unicode__(self):
return "%s - %s, %s" (self.player, self.year, self.yards_per_game)
class Poll(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50)
def __unicode__(self):
return self.name
class PollResults(models.Model):
poll = models.ForeignKey(Poll)
week = models.ForeignKey(Week)
team = models.ForeignKey(College)
rank = models.IntegerField()
def __unicode__(self):
return "%s: %s %s" % (self.poll, self.week, self.team)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import time
import pexpect
import hashlib
import tornado.web
import tornado.gen
import tornado.ioloop
import pexpect.popen_spawn as pspawn
WINDOWS = 'nt'
class TermReader(object):
def __init__(self, tty, socket):
self.tty = tty
self.socket = socket
self.p_callback = tornado.ioloop.PeriodicCallback(self.consume_lines,
callback_time=10)
self.p_callback.start()
@tornado.gen.coroutine
def consume_lines(self):
try:
timeout = 0
if os.name == WINDOWS:
timeout = 100
self.tty.expect('')
_in = self.tty.read_nonblocking(timeout=timeout, size=1000)
# if len(_in) > 0:
# print(_in)
self.socket.notify(_in)
except:
pass
class TermManager(object):
"""Wrapper around pexpect to execute local commands."""
def __init__(self):
self.os = os.name
if self.os == WINDOWS:
self.cmd = 'cmd'
self.pty_fork = pspawn.PopenSpawn
else:
self.cmd = '/usr/bin/env bash'
self.pty_fork = pexpect.spawnu
self.sockets = {}
self.consoles = {}
@tornado.gen.coroutine
def create_term(self, rows, cols):
pid = hashlib.md5(str(time.time()).encode('utf-8')).hexdigest()[0:6]
tty = self.pty_fork(self.cmd)
self.consoles[pid] = {'tty':tty, 'read':None}
self.resize_term(pid, rows, cols)
raise tornado.gen.Return(pid)
@tornado.gen.coroutine
def start_term(self, pid, socket):
term = self.consoles[pid]
self.sockets[pid] = socket
term['tty'].expect('')
term['read'] = TermReader(term['tty'], socket)
@tornado.gen.coroutine
def stop_term(self, pid):
term = self.consoles[pid]
term['tty'].close()
del self.consoles[pid]
del self.sockets[pid]
@tornado.gen.coroutine
def execute(self, pid, cmd):
term = self.consoles[pid]['tty']
if self.os == WINDOWS:
print(cmd)
if cmd == '\n':
cmd = '\r\n'
term.send(cmd)
term.send(cmd)
@tornado.gen.coroutine
def resize_term(self, pid, rows, cols):
if self.os != WINDOWS:
term = self.consoles[pid]['tty']
term.setwinsize(rows, cols)
Windows console doesn't respond to comand input
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import time
import pexpect
import hashlib
import tornado.web
import tornado.gen
import tornado.ioloop
import pexpect.popen_spawn as pspawn
WINDOWS = 'nt'
class TermReader(object):
def __init__(self, tty, socket):
self.tty = tty
self.socket = socket
self.p_callback = tornado.ioloop.PeriodicCallback(self.consume_lines,
callback_time=10)
self.p_callback.start()
@tornado.gen.coroutine
def consume_lines(self):
try:
timeout = 0
if os.name == WINDOWS:
timeout = 100
self.tty.expect('')
_in = self.tty.read_nonblocking(timeout=timeout, size=1000)
# if len(_in) > 0:
# print(_in)
self.socket.notify(_in)
except:
pass
class TermManager(object):
"""Wrapper around pexpect to execute local commands."""
def __init__(self):
self.os = os.name
if self.os == WINDOWS:
self.cmd = 'cmd'
self.pty_fork = pspawn.PopenSpawn
else:
self.cmd = '/usr/bin/env bash'
self.pty_fork = pexpect.spawnu
self.sockets = {}
self.consoles = {}
@tornado.gen.coroutine
def create_term(self, rows, cols):
pid = hashlib.md5(str(time.time()).encode('utf-8')).hexdigest()[0:6]
tty = self.pty_fork(self.cmd)
self.consoles[pid] = {'tty':tty, 'read':None}
self.resize_term(pid, rows, cols)
raise tornado.gen.Return(pid)
@tornado.gen.coroutine
def start_term(self, pid, socket):
term = self.consoles[pid]
self.sockets[pid] = socket
term['tty'].expect('')
term['read'] = TermReader(term['tty'], socket)
@tornado.gen.coroutine
def stop_term(self, pid):
term = self.consoles[pid]
term['tty'].close()
del self.consoles[pid]
del self.sockets[pid]
@tornado.gen.coroutine
def execute(self, pid, cmd):
term = self.consoles[pid]['tty']
if self.os == WINDOWS:
print(cmd)
if cmd == '\n' or cmd = '\r\n':
term.sendline()
term.send(cmd)
@tornado.gen.coroutine
def resize_term(self, pid, rows, cols):
if self.os != WINDOWS:
term = self.consoles[pid]['tty']
term.setwinsize(rows, cols)
|
# -*- coding: utf-8 -*-
#
# python-github2 documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 11 16:16:25 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from sphinx.util import inspect
import cloud_sptheme as csp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.%s" % ext
for ext in ["autodoc", "todo", "intersphinx", "viewcode",
"coverage"]] + \
["sphinxcontrib.%s" % ext for ext in ["cheeseshop", ]]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'github2'
copyright = u'2009-2012, Ask Solem'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import github2
# The short X.Y version.
version = ".".join(map(str, github2.VERSION[:2]))
# The full version, including alpha/beta/rc tags.
release = github2.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'cloud'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"externalrefs": True,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [csp.get_theme_dir(), ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'github2doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'github2.tex', u'github2 Documentation',
u'Ask Solem', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'github2', u'github2 Documentation',
[u'Ask Solem'], 1)
]
autoclass_content = "init"
autodoc_default_flags = ['members', ]
intersphinx_mapping = {
'python': ('http://docs.python.org/', os.getenv('SPHINX_PYTHON_OBJECTS'))
}
# Horrific nastiness to generate correct function signature for decorated
# objects. Close your eyes... Now!
orig_getargspec = inspect.getargspec
def getargspec(func):
if hasattr(func, '__orig_func__'):
return orig_getargspec(func.__orig_func__)
else:
return orig_getargspec(func)
inspect.getargspec = getargspec
[QA] Fix PEP-8 compliance in Sphinx config.
# -*- coding: utf-8 -*-
#
# python-github2 documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 11 16:16:25 2011.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from sphinx.util import inspect
import cloud_sptheme as csp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.%s" % ext
for ext in ["autodoc", "todo", "intersphinx", "viewcode",
"coverage"]] + \
["sphinxcontrib.%s" % ext for ext in ["cheeseshop", ]]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'github2'
copyright = u'2009-2012, Ask Solem'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import github2
# The short X.Y version.
version = ".".join(map(str, github2.VERSION[:2]))
# The full version, including alpha/beta/rc tags.
release = github2.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'cloud'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"externalrefs": True,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [csp.get_theme_dir(), ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'github2doc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'github2.tex', u'github2 Documentation',
u'Ask Solem', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'github2', u'github2 Documentation',
[u'Ask Solem'], 1)
]
autoclass_content = "init"
autodoc_default_flags = ['members', ]
intersphinx_mapping = {
'python': ('http://docs.python.org/', os.getenv('SPHINX_PYTHON_OBJECTS'))
}
# Horrific nastiness to generate correct function signature for decorated
# objects. Close your eyes... Now!
orig_getargspec = inspect.getargspec
def getargspec(func):
if hasattr(func, '__orig_func__'):
return orig_getargspec(func.__orig_func__)
else:
return orig_getargspec(func)
inspect.getargspec = getargspec
|
from dateutil.relativedelta import relativedelta
import datetime
import json
from django.db.models import Avg, Max, Min, Sum, Count
from django.http import JsonResponse
from django.core.cache import cache
from django.db import connection
from django.db.models import Q
from apps.utils.models import (Channel, River,
Feed, AggregateMonthlyFeed,
AggregateDailyFeed, ChannelField)
from util.scripts.timing_decorator import time_usage
def getFeeds(request):
"""
To use this API, you can filter based on four parameters. You can filter by
channel, start time or end time. These three could be used indivually or
together. The fourth parameter is the limit on number of results returned.
The filtering is a GET request.
An example would be
/mamase/api/feed/?channel=1&start='2015-09-09'&end='2015-09-10'&limit=10
This API request will return a very complicate data structure. It has 5
levels namely Top Leval, Time Leval, Aggregation leval, Location leval then
the data points
"""
channel = request.GET.get('channel', None) # Select among list of channels
start = request.GET.get('start', None) # Data should be after this data
end = request.GET.get('end', None) # Data should be before this date
limit = request.GET.get('limit', None) # Maximum number of records
data = request.GET.get('data', 'raw') # Raw,Daily or Monthly. Default(raw)
field = request.GET.get('field', None) # A specific field e.g temp
station_type = request.GET.get('stationtype', "WEATHER_STATION") # Is it a w.station etc
river = request.GET.get('river', None) # Get all data points on a river
'''
These requests need to be cached. Check if the exact requests is in cache.
If in cache, return the cached data. If not, compile and send.
Cache should time out after 10 mins.
'''
cache_key = request.get_full_path() # Use the full path as the cache key
result = cache.get(cache_key)
if result:
return result
kwargs = {}
args = {}
excludeargs = {}
complexargs = {}
if channel:
kwargs['channelfield__channel_id'] = channel
args['id'] = channel
if field:
kwargs['channelfield__field_id'] = field
if start:
kwargs['timestamp__gte'] = start
if end:
kwargs['timestamp__lte'] = end
if station_type:
station_type = station_type.upper()
if station_type == "RAIN_TEMP":
#Just get rain and temp values. Then get other values.
#So filter with other variables.
#Get all rain and temp values
complexargs = (Q(channelfield__field__name__icontains='temp') |
Q(channelfield__field__name__icontains='rain'),)
excludeargs['channelfield__field__name__icontains'] = 'soil temperature'
#args['id__in'] = channel_with_temp_rain
elif station_type == "RIVER_DEPTH":
if not channel and river:
r = River.objects.filter(id=river)
if r:
river = r[0]
kwargs['channelfield__channel_id'] = river.rivers.last().id # Get data for just this station
args['type'] = station_type
else:
args['type'] = station_type
if river:
kwargs['channelfield__channel__river_id'] = river
args['river_id'] = river
feed = {}
feed_without_null = []
if data.lower() == "raw":
feed_without_null = aggregateRawData(station_type, kwargs, complexargs, excludeargs)
elif data.lower() == "daily":
data = aggregateDailyFeedData(station_type, kwargs, complexargs, excludeargs)
feed['daily'] = ({'avg': list(data[0]),
'min': list(data[3]),
'max': list(data[4]),
'count': list(data[2]),
'sum': list(data[1])})
feed_without_null.append(feed)
elif data.lower() == "monthly":
data = aggregateMonthlyFeedData(station_type, kwargs, complexargs, excludeargs)
feed['monthly'] = ({'avg': list(data[0]),
'min': list(data[3]),
'max': list(data[4]),
'count': list(data[2]),
'sum': list(data[1])})
feed_without_null.append(feed)
ch = Channel.objects.filter(**args).order_by('-id')
channels = []
for i in ch:
values = i.channels.values('field__name', 'name',
'id', 'field__id').distinct()
valuesdict = {'id': i.id, 'name': i.name,
'desciption': i.description,
'latitude': i.latitude,
'longitude': i.longitude,
'data_id': i.data_id}
if i.river:
valuesdict['river'] = i.river.id
valuesdict['fields'] = list(values)
channels.append(valuesdict)
rivers = River.objects.all()
riverdata = []
for i in rivers:
riverdata.append({'name': i.name, 'id': i.id})
if limit:
try:
limit = int(limit)
feed = feed[:limit]
except:
pass
channel_without_null = channels # removeEmptyString(ch)
result = JsonResponse(dict(channel=channel_without_null,
feed=feed_without_null,
river=riverdata))
cache.set(cache_key, result)
return result
@time_usage
def getAllData(request):
"""
This API request will return a very complicate data structure. It has 5
levels namely Top Leval, Time Leval, Aggregation leval, Location leval
then the data points
"""
'''
I shall test out two approaches.
- Get all this data from the database and use django aggregators to
do the math and ordering for me
- Get all data and do the ordering via a loop in code
'''
# For raw data can we use
# /mamase/api/feed/?channel=1&start='2015-09-09'&end='2015-09-10'&limit=10
# The get parameters are just an example
ch = Channel.objects.all()
daily_avg = []
daily_sum = []
daily_cnt = []
daily_min = []
daily_max = []
month_avg = []
month_sum = []
month_cnt = []
month_min = []
month_max = []
excludeargs = {}
complexargs = {}
for item in ch:
chd_data = aggregateDailyFeedData(item.channel.type, {"channelfield__channel": item}, complexargs, excludeargs)
chm_data = aggregateMonthlyFeedData(item.channel.type, {"channelfield__channel": item}, complexargs, excludeargs)
daily_avg.append({item.id: list(chd_data[0])})
daily_sum.append({item.id: list(chd_data[1])})
daily_cnt.append({item.id: list(chd_data[2])})
daily_min.append({item.id: list(chd_data[3])})
daily_max.append({item.id: list(chd_data[4])})
month_avg.append({item.id: list(chm_data[0])})
month_sum.append({item.id: list(chm_data[1])})
month_cnt.append({item.id: list(chm_data[2])})
month_min.append({item.id: list(chm_data[3])})
month_max.append({item.id: list(chm_data[4])})
data = {}
data['daily'] = ({'avg': daily_avg, 'min': daily_min, 'max': daily_max,
'count': daily_cnt, 'sum': daily_sum})
data['monthly'] = ({'avg': month_avg, 'min': month_min, 'max': month_max,
'count': month_cnt, 'sum': month_sum})
return JsonResponse(data)
def aggregateRawData(station_type, kwargs, complexargs, excludeargs):
'''
Initially I would just pass the raw query to the API. But that does not
suffice since we separated channel and field as items we can filter by.
Now we shall have to group the data and bundle elements of the same
entryid together.
'''
if station_type == "RIVER_DEPTH":
reading = 'reading'
else:
reading = 'reading'
feed = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp_formatted': "to_char(timestamp, 'YYYY-MM-DD HH24:MI:SS')"})
.values('entry_id', 'channelfield__channel_id',
'channelfield__name', 'timestamp_formatted',
reading, 'id').order_by('entry_id'))
feed = list(feed)
'''
Could call the removeNullValue method but would rather just clean up
everything in one loop. But there will be no nulls per se. Given we now
only store data on known fields. Create tracker variable and group by
entry id
'''
entryid_tracker = None
data = []
field_readings = {}
for item in feed:
if item['entry_id'] == entryid_tracker: # We already have a record for this entry. Append the field data
field_readings[item['channelfield__name']] = item[reading]
else:
'''
At this point, we have moved from one entry_id to another.
During the loop we have been bundling all data of one entry id into
the field readings dict. We now need to appen that to the field
readings dict and empty it for the next entry id. As such we remove
the unneeded fields but backup reading and channel name since they
are needed to create the dict. We then add the dict as is into the
data list and start the loop again.
'''
r = item[reading]
cfn = item['channelfield__name']
item.pop(reading) # Remove unneded fields
item.pop('id')
item.pop('channelfield__name')
item['fields'] = field_readings
data.append(item)
field_readings = {}
field_readings[cfn] = r
entryid_tracker = item['entry_id']
try:
data[-1]['fields'] = field_readings
except:
print "No data found during that time"
return data
def aggregateDailyFeedData(station_type, kwargs, complexargs, excludeargs):
if station_type == "RIVER_DEPTH":
reading = 'reading'
else:
reading = 'reading'
d_avg = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-DD 12:00:00')"})
.values('channelfield__channel', 'channelfield__field',
'channelfield__name', 'timestamp')
.annotate(Avg(reading)))
d_sum = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-DD 12:00:00')"})
.values('channelfield__channel', 'channelfield__field',
'channelfield__name', 'timestamp')
.annotate(Sum(reading),))
d_min = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-DD 12:00:00')"})
.values('channelfield__channel', 'channelfield__field',
'channelfield__name', 'timestamp')
.annotate(Min(reading),))
d_max = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-DD 12:00:00')"})
.values('channelfield__channel', 'channelfield__field',
'channelfield__name', 'timestamp')
.annotate(Max(reading),))
d_count = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-DD 12:00:00')"})
.values('channelfield__channel', 'channelfield__field',
'channelfield__name', 'timestamp')
.annotate(Count(reading),))
d_count = removeZeroValue(d_count)
d_avg = removeNullValue(d_avg)
d_max = removeNullValue(d_max)
d_min = removeNullValue(d_min)
d_sum = removeNullValue(d_sum)
return d_avg, d_sum, d_count, d_min, d_max
def aggregateMonthlyFeedData(station_type, kwargs, complexargs, excludeargs):
if station_type == "RIVER_DEPTH":
reading = 'reading'
else:
reading = 'reading'
month_filter = connection.ops.date_trunc_sql('month', 'timestamp')
#Let aggregate Monthly data
m_avg = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra({'date': month_filter})
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-15 00:00:00')"})
.values('channelfield__field', 'channelfield__channel__name',
'channelfield__name', 'timestamp')
.annotate(Avg(reading),)
.order_by('timestamp', 'channelfield__field')
)
m_max = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra({'date': month_filter})
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-15 00:00:00')"})
.values('channelfield__field', 'channelfield__channel__name',
'channelfield__name', 'timestamp')
.annotate(Max(reading),)
.order_by('timestamp', 'channelfield__field')
)
m_min = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra({'date': month_filter})
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-15 00:00:00')"})
.values('channelfield__field', 'channelfield__channel__name',
'channelfield__name', 'timestamp')
.annotate(Min(reading),)
.order_by('timestamp', 'channelfield__field')
)
m_sum = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra({'date': month_filter})
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-15 00:00:00')"})
.values('channelfield__field', 'channelfield__channel__name',
'channelfield__name', 'timestamp')
.annotate(Sum(reading),)
.order_by('timestamp', 'channelfield__field')
)
m_count = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra({'date': month_filter})
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-15 00:00:00')"})
.values('channelfield__field', 'channelfield__channel__name',
'channelfield__name', 'timestamp')
.annotate(Count(reading),)
.order_by('timestamp', 'channelfield__field')
)
m_count = removeZeroValue(m_count)
m_avg = removeNullValue(m_avg)
m_max = removeNullValue(m_max)
m_min = removeNullValue(m_min)
m_sum = removeNullValue(m_sum)
return m_avg, m_sum, m_count, m_min, m_max
'''
Maybe ignore the loop for now (Approach 3). Will take up alot of precious
time designing the logic and most probably it will be slower. So the above
works great. Like a charm. But it is not sorted by channels. I might have
to do a loop on all channels and get data for that specific channel and
return that as a dictionary. A loop might be unavoidable but it shall not
be more than 10. (Number of channels) and that is acceptable by my books.
Have this as two processes. Calculate and store result as JSON string. Do
a query and get latest json string
'''
def storeAggregatedData(channel=None, start=None, end=None):
'''
- Get most recent data(Monthly and repeat for daily)
- If no recent data, add all data till now
- else continue
- Check if it should be updated
- If no, add new entry
- If yes, overwrite with new content
- Save
#Need to remove the None/null's from the data
'''
#I am tired. Forgive my poorly arranged code. I pray I come back and fix all this later.
#Add Kwargs
kwargs = {}
if channel:
kwargs['channel_id'] = channel
chf = ChannelField.objects.filter(**kwargs)
if not start:
aggregateLatestMonthData(chf,)
aggregateLatestDailyData(chf)
else:
aggregateMultipleMonthlyData(chf, start, end)
aggregateMultipleDailyData(chf, start, end)
def aggregateMultipleMonthlyData(channelfields, start, end):
excludeargs = {}
complexargs = {}
if not end:
end = datetime.datetime.now()
for item in channelfields:
data = aggregateMonthlyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': start,
'timestamp__lte': end}, complexargs,
excludeargs)
createAggregateMonthlyData(data, item)
def aggregateMultipleDailyData(channelfields, start, end):
excludeargs = {}
complexargs = {}
if not end:
end = datetime.datetime.now()
for item in channelfields:
data = aggregateDailyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': start,
'timestamp__lte': end},
complexargs, excludeargs)
createAggregateDailyData(data, item)
def aggregateLatestMonthData(channelfields):
excludeargs = {}
complexargs = {}
for item in channelfields:
currentmonthly = (AggregateMonthlyFeed.objects.filter(channelfield=item)
.order_by('-timestamp').first())
if currentmonthly:
m = datetime.datetime.now().month
thismonth = datetime.datetime.now().replace(day=1, hour=0,
minute=0,
second=0,
microsecond=0)
nextmonth = thismonth + relativedelta(months=1)
midmonth = datetime.datetime.now().replace(day=15,
hour=0,
minute=0,
second=0,
microsecond=0)
timestampmonth = currentmonthly.timestamp.month
if timestampmonth == m:
data = aggregateMonthlyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': thismonth,
'timestamp__lte': nextmonth},
complexargs, excludeargs)
updateAggregateMonthlyData(data, item)
else:
data = aggregateMonthlyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': thismonth,
'timestamp__lte': nextmonth}
complexargs, excludeargs)
newAggregateMonthlyData(data, item, midmonth)
else:
'''No record exisits. Probably a new database'''
mdata = aggregateMonthlyFeedData(item.channel.type, {'channelfield': item},
complexargs, excludeargs)
createAggregateMonthlyData(mdata, item)
def createAggregateMonthlyData(mdata, item):
'''Called the first time we create a database. Initial setup only'''
month_avg = list(mdata[0])
month_sum = list(mdata[1])
month_cnt = list(mdata[2])
month_min = list(mdata[3])
month_max = list(mdata[4])
for ma in month_avg:
x = AggregateMonthlyFeed.objects.get_or_create(data=ma,
channel=item.channel,
channelfield=item,
aggregation='AVG',
timestamp=ma['timestamp'])
#x.save()
for ms in month_sum:
x = AggregateMonthlyFeed.objects.get_or_create(data=ms,
channel=item.channel,
channelfield=item,
aggregation='SUM',
timestamp=ms['timestamp'])
#x.save()
for mc in month_cnt:
x = AggregateMonthlyFeed.objects.get_or_create(data=mc,
channel=item.channel,
channelfield=item,
aggregation='COUNT',
timestamp=mc['timestamp'])
#x.save()
for mmi in month_min:
x = AggregateMonthlyFeed.objects.get_or_create(data=mmi,
channel=item.channel,
channelfield=item,
aggregation='MIN',
timestamp=mmi['timestamp'])
#x.save()
for mma in month_max:
x = AggregateMonthlyFeed.objects.get_or_create(data=mma,
channel=item.channel,
channelfield=item, aggregation='MAX',
timestamp=mma['timestamp'])
#x.save()
def updateAggregateMonthlyData(data, item):
'''Update an exisiting monthly record'''
mc = (AggregateMonthlyFeed.objects.filter(aggregation="COUNT",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
ma = (AggregateMonthlyFeed.objects.filter(aggregation="AVG",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first())
ms = (AggregateMonthlyFeed.objects.filter(aggregation="SUM",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
mma = (AggregateMonthlyFeed.objects.filter(aggregation="MAX",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
mmi = (AggregateMonthlyFeed.objects.filter(aggregation="MIN",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
ma.data = list(data[0])
ma.lastupdate = datetime.datetime.now()
ms.data = list(data[1])
ms.lastupdate = datetime.datetime.now()
mc.data = list(data[2])
mc.lastupdate = datetime.datetime.now()
mmi.data = list(data[3])
mmi.lastupdate = datetime.datetime.now()
mma.data = list(data[4])
mma.lastupdate = datetime.datetime.now()
ma.save()
ms.save()
mc.save()
mma.save()
mmi.save()
def newAggregateMonthlyData(data, item, midmonth):
'''Create data for a new month. Called when we spill over to a new month.'''
month_avg = list(data[0])
month_sum = list(data[1])
month_cnt = list(data[2])
month_min = list(data[3])
month_max = list(data[4])
if month_avg:
ma = AggregateMonthlyFeed(data=month_avg,
channel=item.channel,
channelfield=item,
aggregation='AVG',
timestamp=midmonth)
ma.save()
if month_sum:
ms = AggregateMonthlyFeed(data=month_sum,
channel=item.channel,
channelfield=item,
aggregation='SUM',
timestamp=midmonth)
ms.save()
if month_cnt:
mc = AggregateMonthlyFeed(data=month_cnt,
channel=item.channel,
channelfield=item,
aggregation='COUNT',
timestamp=midmonth)
mc.save()
if month_min:
mmi = AggregateMonthlyFeed(data=month_min,
channel=item.channel,
channelfield=item,
aggregation='MIN',
timestamp=midmonth)
mmi.save()
if month_max:
mma = AggregateMonthlyFeed(data=month_max,
channel=item.channel,
channelfield=item,
aggregation='MAX',
timestamp=midmonth)
mma.save()
def aggregateLatestDailyData(channelfields):
excludeargs = {}
complexargs = {}
for item in channelfields:
currentdaily = (AggregateDailyFeed.objects.filter(channelfield=item)
.order_by('-timestamp').first())
if currentdaily:
d = datetime.datetime.now().day
today = datetime.datetime.now().replace(hour=0, minute=0, second=0,
microsecond=0)
timestampday = currentdaily.timestamp.day
if timestampday == d:
data = aggregateDailyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': today},
complexargs, excludeargs
)
updateAggregateDailyData(data, item, today)
else:
data = aggregateDailyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': today},
complexargs, excludeargs)
newAggregateDailyData(data, item, today)
else:
ddata = aggregateDailyFeedData(item.channel.type, {'channelfield': item},
complexargs, excludeargs)
createAggregateDailyData(ddata, item, today)
def createAggregateDailyData(ddata, item, today):
'''Called the first time we create a database. Initial setup only'''
daily_avg = list(ddata[0])
daily_sum = list(ddata[1])
daily_cnt = list(ddata[2])
daily_min = list(ddata[3])
daily_max = list(ddata[4])
for da in daily_avg:
x = AggregateDailyFeed.objects.get_or_create(data=da,
channel=item.channel,
channelfield=item,
aggregation='AVG',
timestamp=da['timestamp'])
#x.save()
for ds in daily_sum:
x = AggregateDailyFeed.objects.get_or_create(data=ds,
channel=item.channel,
channelfield=item,
aggregation='SUM',
timestamp=ds['timestamp'])
#x.save()
for dc in daily_cnt:
x = AggregateDailyFeed.objects.get_or_create(data=dc,
channel=item.channel,
channelfield=item,
aggregation='COUNT',
timestamp=dc['timestamp'])
#x.save()
for dmi in daily_min:
x = AggregateDailyFeed.objects.get_or_create(data=dmi,
channel=item.channel,
channelfield=item,
aggregation='MIN',
timestamp=dmi['timestamp'])
#x.save()
for dma in daily_max:
x = AggregateDailyFeed.objects.get_or_create(data=dma,
channel=item.channel,
channelfield=item,
aggregation='MAX',
timestamp=dma['timestamp'])
#x.save()
def updateAggregateDailyData(data, item, today):
'''Update an exisiting daily record'''
dc = (AggregateDailyFeed.objects.filter(aggregation="COUNT",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
da = (AggregateDailyFeed.objects.filter(aggregation="AVG",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
ds = (AggregateDailyFeed.objects.filter(aggregation="SUM",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
dma = (AggregateDailyFeed.objects.filter(aggregation="MAX",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
dmi = (AggregateDailyFeed.objects.filter(aggregation="MIN",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
da.data = list(data[0])
da.lastupdate = datetime.datetime.now()
ds.data = list(data[1])
ds.lastupdate = datetime.datetime.now()
dc.data = list(data[2])
dc.lastupdate = datetime.datetime.now()
dma.data = list(data[3])
dma.lastupdate = datetime.datetime.now()
dmi.data = list(data[4])
dmi.lastupdate = datetime.datetime.now()
da.save()
ds.save()
dc.save()
dma.save()
dmi.save()
def newAggregateDailyData(data, item, today):
'''Create data for a new month. Called when we spill over to a new day.'''
daily_avg = list(data[0])
daily_sum = list(data[1])
daily_cnt = list(data[2])
daily_min = list(data[3])
daily_max = list(data[4])
if daily_avg:
da = AggregateDailyFeed(data=daily_avg,
channel=item.channel,
channelfield=item,
aggregation='AVG',
timestamp=today)
da.save()
if daily_sum:
ds = AggregateDailyFeed(data=daily_sum,
channel=item.channel,
channelfield=item,
aggregation='SUM',
timestamp=today)
ds.save()
if daily_cnt:
dc = AggregateDailyFeed(data=daily_cnt,
channel=item.channel,
channelfield=item,
aggregation='COUNT',
timestamp=today)
dc.save()
if daily_min:
dmi = AggregateDailyFeed(data=daily_min,
channel=item.channel,
channelfield=item,
aggregation='MIN',
timestamp=today)
dmi.save()
if daily_max:
dma = AggregateDailyFeed(data=daily_max,
channel=item.channel,
channelfield=item,
aggregation='MAX',
timestamp=today)
dma.save()
def removeNullValue(data):
data_without_null = []
for item in data:
data_without_null.append(dict((k, v) for (k, v) in item.items() if v is not None))
return data_without_null
def removeZeroValue(data):
#For counts
data_without_zero = []
for item in data:
data_without_zero.append(dict((k, v) for (k, v) in item.items() if v is not 0))
return data_without_zero
def removeEmptyString(data):
data_without_empty = []
for item in data:
data_without_empty.append(dict((k, v) for (k, v) in item.items() if v != ""))
return data_without_empty
Added missing comma
from dateutil.relativedelta import relativedelta
import datetime
import json
from django.db.models import Avg, Max, Min, Sum, Count
from django.http import JsonResponse
from django.core.cache import cache
from django.db import connection
from django.db.models import Q
from apps.utils.models import (Channel, River,
Feed, AggregateMonthlyFeed,
AggregateDailyFeed, ChannelField)
from util.scripts.timing_decorator import time_usage
def getFeeds(request):
"""
To use this API, you can filter based on four parameters. You can filter by
channel, start time or end time. These three could be used indivually or
together. The fourth parameter is the limit on number of results returned.
The filtering is a GET request.
An example would be
/mamase/api/feed/?channel=1&start='2015-09-09'&end='2015-09-10'&limit=10
This API request will return a very complicate data structure. It has 5
levels namely Top Leval, Time Leval, Aggregation leval, Location leval then
the data points
"""
channel = request.GET.get('channel', None) # Select among list of channels
start = request.GET.get('start', None) # Data should be after this data
end = request.GET.get('end', None) # Data should be before this date
limit = request.GET.get('limit', None) # Maximum number of records
data = request.GET.get('data', 'raw') # Raw,Daily or Monthly. Default(raw)
field = request.GET.get('field', None) # A specific field e.g temp
station_type = request.GET.get('stationtype', "WEATHER_STATION") # Is it a w.station etc
river = request.GET.get('river', None) # Get all data points on a river
'''
These requests need to be cached. Check if the exact requests is in cache.
If in cache, return the cached data. If not, compile and send.
Cache should time out after 10 mins.
'''
cache_key = request.get_full_path() # Use the full path as the cache key
result = cache.get(cache_key)
if result:
return result
kwargs = {}
args = {}
excludeargs = {}
complexargs = {}
if channel:
kwargs['channelfield__channel_id'] = channel
args['id'] = channel
if field:
kwargs['channelfield__field_id'] = field
if start:
kwargs['timestamp__gte'] = start
if end:
kwargs['timestamp__lte'] = end
if station_type:
station_type = station_type.upper()
if station_type == "RAIN_TEMP":
#Just get rain and temp values. Then get other values.
#So filter with other variables.
#Get all rain and temp values
complexargs = (Q(channelfield__field__name__icontains='temp') |
Q(channelfield__field__name__icontains='rain'),)
excludeargs['channelfield__field__name__icontains'] = 'soil temperature'
#args['id__in'] = channel_with_temp_rain
elif station_type == "RIVER_DEPTH":
if not channel and river:
r = River.objects.filter(id=river)
if r:
river = r[0]
kwargs['channelfield__channel_id'] = river.rivers.last().id # Get data for just this station
args['type'] = station_type
else:
args['type'] = station_type
if river:
kwargs['channelfield__channel__river_id'] = river
args['river_id'] = river
feed = {}
feed_without_null = []
if data.lower() == "raw":
feed_without_null = aggregateRawData(station_type, kwargs, complexargs, excludeargs)
elif data.lower() == "daily":
data = aggregateDailyFeedData(station_type, kwargs, complexargs, excludeargs)
feed['daily'] = ({'avg': list(data[0]),
'min': list(data[3]),
'max': list(data[4]),
'count': list(data[2]),
'sum': list(data[1])})
feed_without_null.append(feed)
elif data.lower() == "monthly":
data = aggregateMonthlyFeedData(station_type, kwargs, complexargs, excludeargs)
feed['monthly'] = ({'avg': list(data[0]),
'min': list(data[3]),
'max': list(data[4]),
'count': list(data[2]),
'sum': list(data[1])})
feed_without_null.append(feed)
ch = Channel.objects.filter(**args).order_by('-id')
channels = []
for i in ch:
values = i.channels.values('field__name', 'name',
'id', 'field__id').distinct()
valuesdict = {'id': i.id, 'name': i.name,
'desciption': i.description,
'latitude': i.latitude,
'longitude': i.longitude,
'data_id': i.data_id}
if i.river:
valuesdict['river'] = i.river.id
valuesdict['fields'] = list(values)
channels.append(valuesdict)
rivers = River.objects.all()
riverdata = []
for i in rivers:
riverdata.append({'name': i.name, 'id': i.id})
if limit:
try:
limit = int(limit)
feed = feed[:limit]
except:
pass
channel_without_null = channels # removeEmptyString(ch)
result = JsonResponse(dict(channel=channel_without_null,
feed=feed_without_null,
river=riverdata))
cache.set(cache_key, result)
return result
@time_usage
def getAllData(request):
"""
This API request will return a very complicate data structure. It has 5
levels namely Top Leval, Time Leval, Aggregation leval, Location leval
then the data points
"""
'''
I shall test out two approaches.
- Get all this data from the database and use django aggregators to
do the math and ordering for me
- Get all data and do the ordering via a loop in code
'''
# For raw data can we use
# /mamase/api/feed/?channel=1&start='2015-09-09'&end='2015-09-10'&limit=10
# The get parameters are just an example
ch = Channel.objects.all()
daily_avg = []
daily_sum = []
daily_cnt = []
daily_min = []
daily_max = []
month_avg = []
month_sum = []
month_cnt = []
month_min = []
month_max = []
excludeargs = {}
complexargs = {}
for item in ch:
chd_data = aggregateDailyFeedData(item.channel.type, {"channelfield__channel": item}, complexargs, excludeargs)
chm_data = aggregateMonthlyFeedData(item.channel.type, {"channelfield__channel": item}, complexargs, excludeargs)
daily_avg.append({item.id: list(chd_data[0])})
daily_sum.append({item.id: list(chd_data[1])})
daily_cnt.append({item.id: list(chd_data[2])})
daily_min.append({item.id: list(chd_data[3])})
daily_max.append({item.id: list(chd_data[4])})
month_avg.append({item.id: list(chm_data[0])})
month_sum.append({item.id: list(chm_data[1])})
month_cnt.append({item.id: list(chm_data[2])})
month_min.append({item.id: list(chm_data[3])})
month_max.append({item.id: list(chm_data[4])})
data = {}
data['daily'] = ({'avg': daily_avg, 'min': daily_min, 'max': daily_max,
'count': daily_cnt, 'sum': daily_sum})
data['monthly'] = ({'avg': month_avg, 'min': month_min, 'max': month_max,
'count': month_cnt, 'sum': month_sum})
return JsonResponse(data)
def aggregateRawData(station_type, kwargs, complexargs, excludeargs):
'''
Initially I would just pass the raw query to the API. But that does not
suffice since we separated channel and field as items we can filter by.
Now we shall have to group the data and bundle elements of the same
entryid together.
'''
if station_type == "RIVER_DEPTH":
reading = 'reading'
else:
reading = 'reading'
feed = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp_formatted': "to_char(timestamp, 'YYYY-MM-DD HH24:MI:SS')"})
.values('entry_id', 'channelfield__channel_id',
'channelfield__name', 'timestamp_formatted',
reading, 'id').order_by('entry_id'))
feed = list(feed)
'''
Could call the removeNullValue method but would rather just clean up
everything in one loop. But there will be no nulls per se. Given we now
only store data on known fields. Create tracker variable and group by
entry id
'''
entryid_tracker = None
data = []
field_readings = {}
for item in feed:
if item['entry_id'] == entryid_tracker: # We already have a record for this entry. Append the field data
field_readings[item['channelfield__name']] = item[reading]
else:
'''
At this point, we have moved from one entry_id to another.
During the loop we have been bundling all data of one entry id into
the field readings dict. We now need to appen that to the field
readings dict and empty it for the next entry id. As such we remove
the unneeded fields but backup reading and channel name since they
are needed to create the dict. We then add the dict as is into the
data list and start the loop again.
'''
r = item[reading]
cfn = item['channelfield__name']
item.pop(reading) # Remove unneded fields
item.pop('id')
item.pop('channelfield__name')
item['fields'] = field_readings
data.append(item)
field_readings = {}
field_readings[cfn] = r
entryid_tracker = item['entry_id']
try:
data[-1]['fields'] = field_readings
except:
print "No data found during that time"
return data
def aggregateDailyFeedData(station_type, kwargs, complexargs, excludeargs):
if station_type == "RIVER_DEPTH":
reading = 'reading'
else:
reading = 'reading'
d_avg = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-DD 12:00:00')"})
.values('channelfield__channel', 'channelfield__field',
'channelfield__name', 'timestamp')
.annotate(Avg(reading)))
d_sum = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-DD 12:00:00')"})
.values('channelfield__channel', 'channelfield__field',
'channelfield__name', 'timestamp')
.annotate(Sum(reading),))
d_min = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-DD 12:00:00')"})
.values('channelfield__channel', 'channelfield__field',
'channelfield__name', 'timestamp')
.annotate(Min(reading),))
d_max = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-DD 12:00:00')"})
.values('channelfield__channel', 'channelfield__field',
'channelfield__name', 'timestamp')
.annotate(Max(reading),))
d_count = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-DD 12:00:00')"})
.values('channelfield__channel', 'channelfield__field',
'channelfield__name', 'timestamp')
.annotate(Count(reading),))
d_count = removeZeroValue(d_count)
d_avg = removeNullValue(d_avg)
d_max = removeNullValue(d_max)
d_min = removeNullValue(d_min)
d_sum = removeNullValue(d_sum)
return d_avg, d_sum, d_count, d_min, d_max
def aggregateMonthlyFeedData(station_type, kwargs, complexargs, excludeargs):
if station_type == "RIVER_DEPTH":
reading = 'reading'
else:
reading = 'reading'
month_filter = connection.ops.date_trunc_sql('month', 'timestamp')
#Let aggregate Monthly data
m_avg = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra({'date': month_filter})
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-15 00:00:00')"})
.values('channelfield__field', 'channelfield__channel__name',
'channelfield__name', 'timestamp')
.annotate(Avg(reading),)
.order_by('timestamp', 'channelfield__field')
)
m_max = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra({'date': month_filter})
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-15 00:00:00')"})
.values('channelfield__field', 'channelfield__channel__name',
'channelfield__name', 'timestamp')
.annotate(Max(reading),)
.order_by('timestamp', 'channelfield__field')
)
m_min = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra({'date': month_filter})
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-15 00:00:00')"})
.values('channelfield__field', 'channelfield__channel__name',
'channelfield__name', 'timestamp')
.annotate(Min(reading),)
.order_by('timestamp', 'channelfield__field')
)
m_sum = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra({'date': month_filter})
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-15 00:00:00')"})
.values('channelfield__field', 'channelfield__channel__name',
'channelfield__name', 'timestamp')
.annotate(Sum(reading),)
.order_by('timestamp', 'channelfield__field')
)
m_count = (Feed.objects.filter(*complexargs).filter(**kwargs).exclude(**excludeargs)
.extra({'date': month_filter})
.extra(select={'timestamp': "to_char(timestamp, 'YYYY-MM-15 00:00:00')"})
.values('channelfield__field', 'channelfield__channel__name',
'channelfield__name', 'timestamp')
.annotate(Count(reading),)
.order_by('timestamp', 'channelfield__field')
)
m_count = removeZeroValue(m_count)
m_avg = removeNullValue(m_avg)
m_max = removeNullValue(m_max)
m_min = removeNullValue(m_min)
m_sum = removeNullValue(m_sum)
return m_avg, m_sum, m_count, m_min, m_max
'''
Maybe ignore the loop for now (Approach 3). Will take up alot of precious
time designing the logic and most probably it will be slower. So the above
works great. Like a charm. But it is not sorted by channels. I might have
to do a loop on all channels and get data for that specific channel and
return that as a dictionary. A loop might be unavoidable but it shall not
be more than 10. (Number of channels) and that is acceptable by my books.
Have this as two processes. Calculate and store result as JSON string. Do
a query and get latest json string
'''
def storeAggregatedData(channel=None, start=None, end=None):
'''
- Get most recent data(Monthly and repeat for daily)
- If no recent data, add all data till now
- else continue
- Check if it should be updated
- If no, add new entry
- If yes, overwrite with new content
- Save
#Need to remove the None/null's from the data
'''
#I am tired. Forgive my poorly arranged code. I pray I come back and fix all this later.
#Add Kwargs
kwargs = {}
if channel:
kwargs['channel_id'] = channel
chf = ChannelField.objects.filter(**kwargs)
if not start:
aggregateLatestMonthData(chf,)
aggregateLatestDailyData(chf)
else:
aggregateMultipleMonthlyData(chf, start, end)
aggregateMultipleDailyData(chf, start, end)
def aggregateMultipleMonthlyData(channelfields, start, end):
excludeargs = {}
complexargs = {}
if not end:
end = datetime.datetime.now()
for item in channelfields:
data = aggregateMonthlyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': start,
'timestamp__lte': end}, complexargs,
excludeargs)
createAggregateMonthlyData(data, item)
def aggregateMultipleDailyData(channelfields, start, end):
excludeargs = {}
complexargs = {}
if not end:
end = datetime.datetime.now()
for item in channelfields:
data = aggregateDailyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': start,
'timestamp__lte': end},
complexargs, excludeargs)
createAggregateDailyData(data, item)
def aggregateLatestMonthData(channelfields):
excludeargs = {}
complexargs = {}
for item in channelfields:
currentmonthly = (AggregateMonthlyFeed.objects.filter(channelfield=item)
.order_by('-timestamp').first())
if currentmonthly:
m = datetime.datetime.now().month
thismonth = datetime.datetime.now().replace(day=1, hour=0,
minute=0,
second=0,
microsecond=0)
nextmonth = thismonth + relativedelta(months=1)
midmonth = datetime.datetime.now().replace(day=15,
hour=0,
minute=0,
second=0,
microsecond=0)
timestampmonth = currentmonthly.timestamp.month
if timestampmonth == m:
data = aggregateMonthlyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': thismonth,
'timestamp__lte': nextmonth},
complexargs, excludeargs)
updateAggregateMonthlyData(data, item)
else:
data = aggregateMonthlyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': thismonth,
'timestamp__lte': nextmonth},
complexargs, excludeargs)
newAggregateMonthlyData(data, item, midmonth)
else:
'''No record exisits. Probably a new database'''
mdata = aggregateMonthlyFeedData(item.channel.type, {'channelfield': item},
complexargs, excludeargs)
createAggregateMonthlyData(mdata, item)
def createAggregateMonthlyData(mdata, item):
'''Called the first time we create a database. Initial setup only'''
month_avg = list(mdata[0])
month_sum = list(mdata[1])
month_cnt = list(mdata[2])
month_min = list(mdata[3])
month_max = list(mdata[4])
for ma in month_avg:
x = AggregateMonthlyFeed.objects.get_or_create(data=ma,
channel=item.channel,
channelfield=item,
aggregation='AVG',
timestamp=ma['timestamp'])
#x.save()
for ms in month_sum:
x = AggregateMonthlyFeed.objects.get_or_create(data=ms,
channel=item.channel,
channelfield=item,
aggregation='SUM',
timestamp=ms['timestamp'])
#x.save()
for mc in month_cnt:
x = AggregateMonthlyFeed.objects.get_or_create(data=mc,
channel=item.channel,
channelfield=item,
aggregation='COUNT',
timestamp=mc['timestamp'])
#x.save()
for mmi in month_min:
x = AggregateMonthlyFeed.objects.get_or_create(data=mmi,
channel=item.channel,
channelfield=item,
aggregation='MIN',
timestamp=mmi['timestamp'])
#x.save()
for mma in month_max:
x = AggregateMonthlyFeed.objects.get_or_create(data=mma,
channel=item.channel,
channelfield=item, aggregation='MAX',
timestamp=mma['timestamp'])
#x.save()
def updateAggregateMonthlyData(data, item):
'''Update an exisiting monthly record'''
mc = (AggregateMonthlyFeed.objects.filter(aggregation="COUNT",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
ma = (AggregateMonthlyFeed.objects.filter(aggregation="AVG",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first())
ms = (AggregateMonthlyFeed.objects.filter(aggregation="SUM",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
mma = (AggregateMonthlyFeed.objects.filter(aggregation="MAX",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
mmi = (AggregateMonthlyFeed.objects.filter(aggregation="MIN",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
ma.data = list(data[0])
ma.lastupdate = datetime.datetime.now()
ms.data = list(data[1])
ms.lastupdate = datetime.datetime.now()
mc.data = list(data[2])
mc.lastupdate = datetime.datetime.now()
mmi.data = list(data[3])
mmi.lastupdate = datetime.datetime.now()
mma.data = list(data[4])
mma.lastupdate = datetime.datetime.now()
ma.save()
ms.save()
mc.save()
mma.save()
mmi.save()
def newAggregateMonthlyData(data, item, midmonth):
'''Create data for a new month. Called when we spill over to a new month.'''
month_avg = list(data[0])
month_sum = list(data[1])
month_cnt = list(data[2])
month_min = list(data[3])
month_max = list(data[4])
if month_avg:
ma = AggregateMonthlyFeed(data=month_avg,
channel=item.channel,
channelfield=item,
aggregation='AVG',
timestamp=midmonth)
ma.save()
if month_sum:
ms = AggregateMonthlyFeed(data=month_sum,
channel=item.channel,
channelfield=item,
aggregation='SUM',
timestamp=midmonth)
ms.save()
if month_cnt:
mc = AggregateMonthlyFeed(data=month_cnt,
channel=item.channel,
channelfield=item,
aggregation='COUNT',
timestamp=midmonth)
mc.save()
if month_min:
mmi = AggregateMonthlyFeed(data=month_min,
channel=item.channel,
channelfield=item,
aggregation='MIN',
timestamp=midmonth)
mmi.save()
if month_max:
mma = AggregateMonthlyFeed(data=month_max,
channel=item.channel,
channelfield=item,
aggregation='MAX',
timestamp=midmonth)
mma.save()
def aggregateLatestDailyData(channelfields):
excludeargs = {}
complexargs = {}
for item in channelfields:
currentdaily = (AggregateDailyFeed.objects.filter(channelfield=item)
.order_by('-timestamp').first())
if currentdaily:
d = datetime.datetime.now().day
today = datetime.datetime.now().replace(hour=0, minute=0, second=0,
microsecond=0)
timestampday = currentdaily.timestamp.day
if timestampday == d:
data = aggregateDailyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': today},
complexargs, excludeargs
)
updateAggregateDailyData(data, item, today)
else:
data = aggregateDailyFeedData(item.channel.type, {'channelfield': item,
'timestamp__gte': today},
complexargs, excludeargs)
newAggregateDailyData(data, item, today)
else:
ddata = aggregateDailyFeedData(item.channel.type, {'channelfield': item},
complexargs, excludeargs)
createAggregateDailyData(ddata, item, today)
def createAggregateDailyData(ddata, item, today):
'''Called the first time we create a database. Initial setup only'''
daily_avg = list(ddata[0])
daily_sum = list(ddata[1])
daily_cnt = list(ddata[2])
daily_min = list(ddata[3])
daily_max = list(ddata[4])
for da in daily_avg:
x = AggregateDailyFeed.objects.get_or_create(data=da,
channel=item.channel,
channelfield=item,
aggregation='AVG',
timestamp=da['timestamp'])
#x.save()
for ds in daily_sum:
x = AggregateDailyFeed.objects.get_or_create(data=ds,
channel=item.channel,
channelfield=item,
aggregation='SUM',
timestamp=ds['timestamp'])
#x.save()
for dc in daily_cnt:
x = AggregateDailyFeed.objects.get_or_create(data=dc,
channel=item.channel,
channelfield=item,
aggregation='COUNT',
timestamp=dc['timestamp'])
#x.save()
for dmi in daily_min:
x = AggregateDailyFeed.objects.get_or_create(data=dmi,
channel=item.channel,
channelfield=item,
aggregation='MIN',
timestamp=dmi['timestamp'])
#x.save()
for dma in daily_max:
x = AggregateDailyFeed.objects.get_or_create(data=dma,
channel=item.channel,
channelfield=item,
aggregation='MAX',
timestamp=dma['timestamp'])
#x.save()
def updateAggregateDailyData(data, item, today):
'''Update an exisiting daily record'''
dc = (AggregateDailyFeed.objects.filter(aggregation="COUNT",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
da = (AggregateDailyFeed.objects.filter(aggregation="AVG",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
ds = (AggregateDailyFeed.objects.filter(aggregation="SUM",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
dma = (AggregateDailyFeed.objects.filter(aggregation="MAX",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
dmi = (AggregateDailyFeed.objects.filter(aggregation="MIN",
channel=item.channel,
channelfield=item)
.order_by('-timestamp').first()
)
da.data = list(data[0])
da.lastupdate = datetime.datetime.now()
ds.data = list(data[1])
ds.lastupdate = datetime.datetime.now()
dc.data = list(data[2])
dc.lastupdate = datetime.datetime.now()
dma.data = list(data[3])
dma.lastupdate = datetime.datetime.now()
dmi.data = list(data[4])
dmi.lastupdate = datetime.datetime.now()
da.save()
ds.save()
dc.save()
dma.save()
dmi.save()
def newAggregateDailyData(data, item, today):
'''Create data for a new month. Called when we spill over to a new day.'''
daily_avg = list(data[0])
daily_sum = list(data[1])
daily_cnt = list(data[2])
daily_min = list(data[3])
daily_max = list(data[4])
if daily_avg:
da = AggregateDailyFeed(data=daily_avg,
channel=item.channel,
channelfield=item,
aggregation='AVG',
timestamp=today)
da.save()
if daily_sum:
ds = AggregateDailyFeed(data=daily_sum,
channel=item.channel,
channelfield=item,
aggregation='SUM',
timestamp=today)
ds.save()
if daily_cnt:
dc = AggregateDailyFeed(data=daily_cnt,
channel=item.channel,
channelfield=item,
aggregation='COUNT',
timestamp=today)
dc.save()
if daily_min:
dmi = AggregateDailyFeed(data=daily_min,
channel=item.channel,
channelfield=item,
aggregation='MIN',
timestamp=today)
dmi.save()
if daily_max:
dma = AggregateDailyFeed(data=daily_max,
channel=item.channel,
channelfield=item,
aggregation='MAX',
timestamp=today)
dma.save()
def removeNullValue(data):
data_without_null = []
for item in data:
data_without_null.append(dict((k, v) for (k, v) in item.items() if v is not None))
return data_without_null
def removeZeroValue(data):
#For counts
data_without_zero = []
for item in data:
data_without_zero.append(dict((k, v) for (k, v) in item.items() if v is not 0))
return data_without_zero
def removeEmptyString(data):
data_without_empty = []
for item in data:
data_without_empty.append(dict((k, v) for (k, v) in item.items() if v != ""))
return data_without_empty
|
import os
import re
from shutil import rmtree
from subprocess import check_call, PIPE
from distutils.command.config import config
from citools.version import get_git_describe, compute_version, compute_meta_version
from citools.git import fetch_repository
__all__ = ("Dependency", "ControlParser")
class BuildDebianPackage(config):
""" After debianization is in place, build a package for it """
description = "run debian build wrapper dpkg-buildpackage"
user_options = [
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
check_call(['dpkg-buildpackage', '-rfakeroot', '-us', '-uc'], stdout=PIPE)
class Dependency(object):
"""
Dependency in the debian package
"""
def __init__(self, name, version=None, sign=None):
super(Dependency, self).__init__()
self.name = name
self.version = version
self.sign = sign
if not self.sign and version:
self.sign = u'='
def get_dependency_string(self):
if self.version:
return u"%(name)s (%(sign)s %(version)s)" % {
'name' : self.name,
'version' : self.version,
'sign' : self.sign or '=',
}
else:
return self.name
def extract_version_from_debversion(self, debversion):
version = re.match("\ ?\((?P<sign>[\=\>\<]+)\ (?P<version>[0-9\-\.]+)\)", debversion)
if version and version.groupdict().has_key('sign') \
and version.groupdict().has_key('version'):
self.version = version.groupdict()['version']
self.sign = version.groupdict()['sign']
def update_version(self, version_candidate):
"""
Update my version if I'm allowed to do that
"""
if self.sign == '=':
self.version = version_candidate
def __str__(self):
return u"%(name)s: %(version)s" % {
'name' : self.name,
'version' : self.version or '<unspecified>',
}
class ControlParser(object):
"""
Parser for debian/control files
"""
def __init__(self, control_file):
super(ControlParser, self).__init__()
self.control_file = control_file
def parse_dependency_line(self, line):
""" Return dependency from Depends: line """
#TODO: Better parsing, especially when we will need image-<version> package
line = line[len("Depends:"):]
dependencies = []
dependency_candidates = line.split(",")
for candidate in dependency_candidates:
deps = re.findall("(?P<name>[a-z0-9\-]+)(?P<version>\ \([\=\>\<]+\ [0-9\-\.]+\))?", candidate)
for dep in deps:
new_dep = Dependency(dep[0])
if dep[1]:
new_dep.extract_version_from_debversion(dep[1])
dependencies.append(new_dep)
return dependencies
def get_dependencies(self):
""" Return list of dependencies from control file """
dependencies = []
for line in self.control_file.splitlines():
if line.startswith('Depends:'):
dependencies.extend(self.parse_dependency_line(line))
return dependencies
def get_packages(self):
""" Return list of packages present in file """
packages = []
for line in self.control_file.splitlines():
if line.startswith('Package:'):
packages.extend([i.strip() for i in line[len('Package:'):].split(',')])
return packages
def check_downgrade(self, current_version, new_version):
"""
Raise ValueError if new_version is lower then current_version
"""
curr_tuple = map(int, current_version.split("."))
new_tuple = map(int, new_version.split("."))
for i in xrange(0, len(curr_tuple)):
if len(new_tuple) < (i+1):
raise ValueError("Attempt to downgrade %s to %s" % (
current_version,
new_version,
))
elif new_tuple[i] > curr_tuple[i]:
return True
elif (new_tuple[i] < curr_tuple[i]):
raise ValueError("Attempt to downgrade %s to %s" % (
current_version,
new_version,
))
return True
def get_dependency_merge(self, current_dependencies, new_dependencies):
"""
Merge old dependencies with new one. If current dependency has version specified
and it's in new_dependencies as well, replace it with it's version.
Otherwise, leave it untouched.
"""
deps = []
for current_dep in current_dependencies:
if current_dep.version:
candidates = [i for i in new_dependencies if i.name == current_dep.name]
if len(candidates) > 1:
raise ValueError(u"More then one dependency with same name")
if len(candidates) == 1 and candidates[0].version:
if current_dep.version:
self.check_downgrade(current_dep.version, candidates[0].version)
current_dep.update_version(candidates[0].version)
deps.append(current_dep)
return deps
def replace_dependencies(self, dependencies):
"""
In my control file, replace version of dependencies with exact version
"""
new_control_file = []
for line in self.control_file.splitlines():
if line.startswith('Depends:'):
new_deps = self.get_dependency_merge(current_dependencies=self.parse_dependency_line(line),
new_dependencies=dependencies)
dep_string = u", ".join(
[i.get_dependency_string() for i in new_deps]
)
if dep_string:
line = u"Depends: %s" % dep_string
else:
line = u"Depends:"
new_control_file.append(line)
self.control_file = u'\n'.join(new_control_file)
# newline at the and of the file is good manner
self.control_file += u'\n'
def fetch_new_dependencies(repository):
repo = fetch_repository(
repository=repository['url'], branch=repository['branch']
)
deps = get_new_dependencies(repo)
rmtree(repo)
return deps
def get_new_dependencies(dir):
parser = ControlParser(open(os.path.join(dir, 'debian', 'control')).read())
packages = parser.get_packages()
version = ".".join(map(str, compute_version(get_git_describe(repository_directory=dir, fix_environment=True))))
deps = [Dependency(package, version) for package in packages]
return deps
def update_dependency_versions(repositories, control_path, workdir=None):
"""
Update control_path (presumably debian/control) with package version collected
by parsing debian/controls in dependencies.
Also updates with change of my path.
"""
workdir = workdir or os.curdir
f = open(control_path)
meta_parser = ControlParser(f.read())
f.close()
deps_from_repositories = []
for repository in repositories:
deps = fetch_new_dependencies(repository)
deps_from_repositories.extend(deps)
#FIXME: This will download deps again, fix it
meta_version = compute_meta_version(repositories, workdir=workdir)
meta_version_string = ".".join(map(str, meta_version))
# also add myself as dependency
deps = get_new_dependencies(workdir)
# deps are my actual version; we want to update it to metaversion
for dep in deps:
dep.version = meta_version_string
deps_from_repositories.extend(deps)
meta_parser.replace_dependencies(deps_from_repositories)
f = open(control_path, 'w')
f.write(meta_parser.control_file)
f.close()
class UpdateDependencyVersions(config):
description = "parse and update versions in debian control file"
user_options = [
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
update_dependency_versions(self.distribution.dependencies_git_repositories, os.path.join('debian', 'control'))
Debian seems to freeze with PIPEd stdout
import os
import re
from shutil import rmtree
from subprocess import check_call, PIPE
from distutils.command.config import config
from citools.version import get_git_describe, compute_version, compute_meta_version
from citools.git import fetch_repository
__all__ = ("Dependency", "ControlParser")
class BuildDebianPackage(config):
""" After debianization is in place, build a package for it """
description = "run debian build wrapper dpkg-buildpackage"
user_options = [
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
check_call(['dpkg-buildpackage', '-rfakeroot', '-us', '-uc'])
class Dependency(object):
"""
Dependency in the debian package
"""
def __init__(self, name, version=None, sign=None):
super(Dependency, self).__init__()
self.name = name
self.version = version
self.sign = sign
if not self.sign and version:
self.sign = u'='
def get_dependency_string(self):
if self.version:
return u"%(name)s (%(sign)s %(version)s)" % {
'name' : self.name,
'version' : self.version,
'sign' : self.sign or '=',
}
else:
return self.name
def extract_version_from_debversion(self, debversion):
version = re.match("\ ?\((?P<sign>[\=\>\<]+)\ (?P<version>[0-9\-\.]+)\)", debversion)
if version and version.groupdict().has_key('sign') \
and version.groupdict().has_key('version'):
self.version = version.groupdict()['version']
self.sign = version.groupdict()['sign']
def update_version(self, version_candidate):
"""
Update my version if I'm allowed to do that
"""
if self.sign == '=':
self.version = version_candidate
def __str__(self):
return u"%(name)s: %(version)s" % {
'name' : self.name,
'version' : self.version or '<unspecified>',
}
class ControlParser(object):
"""
Parser for debian/control files
"""
def __init__(self, control_file):
super(ControlParser, self).__init__()
self.control_file = control_file
def parse_dependency_line(self, line):
""" Return dependency from Depends: line """
#TODO: Better parsing, especially when we will need image-<version> package
line = line[len("Depends:"):]
dependencies = []
dependency_candidates = line.split(",")
for candidate in dependency_candidates:
deps = re.findall("(?P<name>[a-z0-9\-]+)(?P<version>\ \([\=\>\<]+\ [0-9\-\.]+\))?", candidate)
for dep in deps:
new_dep = Dependency(dep[0])
if dep[1]:
new_dep.extract_version_from_debversion(dep[1])
dependencies.append(new_dep)
return dependencies
def get_dependencies(self):
""" Return list of dependencies from control file """
dependencies = []
for line in self.control_file.splitlines():
if line.startswith('Depends:'):
dependencies.extend(self.parse_dependency_line(line))
return dependencies
def get_packages(self):
""" Return list of packages present in file """
packages = []
for line in self.control_file.splitlines():
if line.startswith('Package:'):
packages.extend([i.strip() for i in line[len('Package:'):].split(',')])
return packages
def check_downgrade(self, current_version, new_version):
"""
Raise ValueError if new_version is lower then current_version
"""
curr_tuple = map(int, current_version.split("."))
new_tuple = map(int, new_version.split("."))
for i in xrange(0, len(curr_tuple)):
if len(new_tuple) < (i+1):
raise ValueError("Attempt to downgrade %s to %s" % (
current_version,
new_version,
))
elif new_tuple[i] > curr_tuple[i]:
return True
elif (new_tuple[i] < curr_tuple[i]):
raise ValueError("Attempt to downgrade %s to %s" % (
current_version,
new_version,
))
return True
def get_dependency_merge(self, current_dependencies, new_dependencies):
"""
Merge old dependencies with new one. If current dependency has version specified
and it's in new_dependencies as well, replace it with it's version.
Otherwise, leave it untouched.
"""
deps = []
for current_dep in current_dependencies:
if current_dep.version:
candidates = [i for i in new_dependencies if i.name == current_dep.name]
if len(candidates) > 1:
raise ValueError(u"More then one dependency with same name")
if len(candidates) == 1 and candidates[0].version:
if current_dep.version:
self.check_downgrade(current_dep.version, candidates[0].version)
current_dep.update_version(candidates[0].version)
deps.append(current_dep)
return deps
def replace_dependencies(self, dependencies):
"""
In my control file, replace version of dependencies with exact version
"""
new_control_file = []
for line in self.control_file.splitlines():
if line.startswith('Depends:'):
new_deps = self.get_dependency_merge(current_dependencies=self.parse_dependency_line(line),
new_dependencies=dependencies)
dep_string = u", ".join(
[i.get_dependency_string() for i in new_deps]
)
if dep_string:
line = u"Depends: %s" % dep_string
else:
line = u"Depends:"
new_control_file.append(line)
self.control_file = u'\n'.join(new_control_file)
# newline at the and of the file is good manner
self.control_file += u'\n'
def fetch_new_dependencies(repository):
repo = fetch_repository(
repository=repository['url'], branch=repository['branch']
)
deps = get_new_dependencies(repo)
rmtree(repo)
return deps
def get_new_dependencies(dir):
parser = ControlParser(open(os.path.join(dir, 'debian', 'control')).read())
packages = parser.get_packages()
version = ".".join(map(str, compute_version(get_git_describe(repository_directory=dir, fix_environment=True))))
deps = [Dependency(package, version) for package in packages]
return deps
def update_dependency_versions(repositories, control_path, workdir=None):
"""
Update control_path (presumably debian/control) with package version collected
by parsing debian/controls in dependencies.
Also updates with change of my path.
"""
workdir = workdir or os.curdir
f = open(control_path)
meta_parser = ControlParser(f.read())
f.close()
deps_from_repositories = []
for repository in repositories:
deps = fetch_new_dependencies(repository)
deps_from_repositories.extend(deps)
#FIXME: This will download deps again, fix it
meta_version = compute_meta_version(repositories, workdir=workdir)
meta_version_string = ".".join(map(str, meta_version))
# also add myself as dependency
deps = get_new_dependencies(workdir)
# deps are my actual version; we want to update it to metaversion
for dep in deps:
dep.version = meta_version_string
deps_from_repositories.extend(deps)
meta_parser.replace_dependencies(deps_from_repositories)
f = open(control_path, 'w')
f.write(meta_parser.control_file)
f.close()
class UpdateDependencyVersions(config):
description = "parse and update versions in debian control file"
user_options = [
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
update_dependency_versions(self.distribution.dependencies_git_repositories, os.path.join('debian', 'control'))
|
from types import StringTypes
from collections import defaultdict
from sfa.util.sfatime import utcparse, datetime_to_epoch
from sfa.util.sfalogging import logger
from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
from sfa.rspecs.rspec import RSpec
from sfa.planetlab.vlink import VLink
from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname
import time
MAXINT = 2L**31-1
class PlSlices:
rspec_to_slice_tag = {'max_rate':'net_max_rate'}
def __init__(self, driver):
self.driver = driver
def get_slivers(self, xrn, node=None):
hrn, type = urn_to_hrn(xrn)
slice_name = hrn_to_pl_slicename(hrn)
# XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
# of doing all of this?
#return self.driver.shell.GetSliceTicket(self.auth, slice_name)
# from PLCAPI.GetSlivers.get_slivers()
slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids']
slices = self.driver.shell.GetSlices(slice_name, slice_fields)
# Build up list of users and slice attributes
person_ids = set()
all_slice_tag_ids = set()
for slice in slices:
person_ids.update(slice['person_ids'])
all_slice_tag_ids.update(slice['slice_tag_ids'])
person_ids = list(person_ids)
all_slice_tag_ids = list(all_slice_tag_ids)
# Get user information
all_persons_list = self.driver.shell.GetPersons({'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids'])
all_persons = {}
for person in all_persons_list:
all_persons[person['person_id']] = person
# Build up list of keys
key_ids = set()
for person in all_persons.values():
key_ids.update(person['key_ids'])
key_ids = list(key_ids)
# Get user account keys
all_keys_list = self.driver.shell.GetKeys(key_ids, ['key_id', 'key', 'key_type'])
all_keys = {}
for key in all_keys_list:
all_keys[key['key_id']] = key
# Get slice attributes
all_slice_tags_list = self.driver.shell.GetSliceTags(all_slice_tag_ids)
all_slice_tags = {}
for slice_tag in all_slice_tags_list:
all_slice_tags[slice_tag['slice_tag_id']] = slice_tag
slivers = []
for slice in slices:
keys = []
for person_id in slice['person_ids']:
if person_id in all_persons:
person = all_persons[person_id]
if not person['enabled']:
continue
for key_id in person['key_ids']:
if key_id in all_keys:
key = all_keys[key_id]
keys += [{'key_type': key['key_type'],
'key': key['key']}]
attributes = []
# All (per-node and global) attributes for this slice
slice_tags = []
for slice_tag_id in slice['slice_tag_ids']:
if slice_tag_id in all_slice_tags:
slice_tags.append(all_slice_tags[slice_tag_id])
# Per-node sliver attributes take precedence over global
# slice attributes, so set them first.
# Then comes nodegroup slice attributes
# Followed by global slice attributes
sliver_attributes = []
if node is not None:
for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags):
sliver_attributes.append(sliver_attribute['tagname'])
attributes.append({'tagname': sliver_attribute['tagname'],
'value': sliver_attribute['value']})
# set nodegroup slice attributes
for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags):
# Do not set any nodegroup slice attributes for
# which there is at least one sliver attribute
# already set.
if slice_tag not in slice_tags:
attributes.append({'tagname': slice_tag['tagname'],
'value': slice_tag['value']})
for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags):
# Do not set any global slice attributes for
# which there is at least one sliver attribute
# already set.
if slice_tag['tagname'] not in sliver_attributes:
attributes.append({'tagname': slice_tag['tagname'],
'value': slice_tag['value']})
# XXX Sanity check; though technically this should be a system invariant
# checked with an assertion
if slice['expires'] > MAXINT: slice['expires']= MAXINT
slivers.append({
'hrn': hrn,
'name': slice['name'],
'slice_id': slice['slice_id'],
'instantiation': slice['instantiation'],
'expires': slice['expires'],
'keys': keys,
'attributes': attributes
})
return slivers
def get_peer(self, xrn):
hrn, type = urn_to_hrn(xrn)
# Becaues of myplc federation, we first need to determine if this
# slice belongs to out local plc or a myplc peer. We will assume it
# is a local site, unless we find out otherwise
peer = None
# get this slice's authority (site)
slice_authority = get_authority(hrn)
# get this site's authority (sfa root authority or sub authority)
site_authority = get_authority(slice_authority).lower()
# check if we are already peered with this site_authority, if so
peers = self.driver.shell.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
for peer_record in peers:
names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
if site_authority in names:
peer = peer_record
return peer
def get_sfa_peer(self, xrn):
hrn, type = urn_to_hrn(xrn)
# return the authority for this hrn or None if we are the authority
sfa_peer = None
slice_authority = get_authority(hrn)
site_authority = get_authority(slice_authority)
if site_authority != self.driver.hrn:
sfa_peer = site_authority
return sfa_peer
def verify_slice_leases(self, slice, rspec_requested_leases, peer):
leases = self.driver.shell.GetLeases({'name':slice['name'], 'clip':int(time.time())}, ['lease_id','name', 'hostname', 't_from', 't_until'])
grain = self.driver.shell.GetLeaseGranularity()
requested_leases = []
for lease in rspec_requested_leases:
requested_lease = {}
slice_name = hrn_to_pl_slicename(lease['slice_id'])
if slice_name != slice['name']:
continue
hostname = xrn_to_hostname(lease['component_id'])
# fill the requested node with nitos ids
requested_lease['name'] = slice['name']
requested_lease['hostname'] = hostname
requested_lease['t_from'] = int(lease['start_time'])
requested_lease['t_until'] = int(lease['duration']) * grain + int(lease['start_time'])
requested_leases.append(requested_lease)
# prepare actual slice leases by lease_id
leases_by_id = {}
for lease in leases:
leases_by_id[lease['lease_id']] = {'name': lease['name'], 'hostname': lease['hostname'], \
't_from': lease['t_from'], 't_until': lease['t_until']}
added_leases = []
kept_leases_id = []
deleted_leases_id = []
for lease_id in leases_by_id:
if leases_by_id[lease_id] not in requested_leases:
deleted_leases_id.append(lease_id)
else:
kept_leases_id.append(lease_id)
requested_leases.remove(leases_by_id[lease_id])
added_leases = requested_leases
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
self.driver.shell.DeleteLeases(deleted_leases_id)
for lease in added_leases:
self.driver.shell.AddLeases(lease['hostname'], slice['name'], lease['t_from'], lease['t_until'])
except:
logger.log_exc('Failed to add/remove slice leases')
return leases
def verify_slice_nodes(self, slice, slivers, peer):
nodes = self.driver.shell.GetNodes(slice['node_ids'], ['node_id', 'hostname', 'interface_ids'])
current_slivers = [node['hostname'] for node in nodes]
requested_slivers = []
tags = []
for node in slivers:
hostname = None
if node.get('component_name'):
hostname = node.get('component_name').strip()
elif node.get('component_id'):
hostname = xrn_to_hostname(node.get('component_id').strip())
if node.get('client_id'):
tags.append({'slicename': slice['name'],
'tagname': 'client_id',
'value': node['client_id'],
'node': hostname})
if hostname:
requested_slivers.append(hostname)
# remove nodes not in rspec
deleted_nodes = list(set(current_slivers).difference(requested_slivers))
# add nodes from rspec
added_nodes = list(set(requested_slivers).difference(current_slivers))
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
self.driver.shell.AddSliceToNodes(slice['name'], added_nodes)
self.driver.shell.DeleteSliceFromNodes(slice['name'], deleted_nodes)
except:
logger.log_exc('Failed to add/remove slice from nodes')
# add tags
for tag in tags:
try:
self.driver.shell.AddSliceTag(tag['slicename'], tag['tagname'], tag['value'], tag['node'])
except:
logger.log_exc('Failed to add slice tag')
return nodes
def free_egre_key(self):
used = set()
for tag in self.driver.shell.GetSliceTags({'tagname': 'egre_key'}):
used.add(int(tag['value']))
for i in range(1, 256):
if i not in used:
key = i
break
else:
raise KeyError("No more EGRE keys available")
return str(key)
def verify_slice_links(self, slice, requested_links, nodes):
# nodes is undefined here
if not requested_links:
return
# build dict of nodes
nodes_dict = {}
interface_ids = []
for node in nodes:
nodes_dict[node['node_id']] = node
interface_ids.extend(node['interface_ids'])
# build dict of interfaces
interfaces = self.driver.shell.GetInterfaces(interface_ids)
interfaces_dict = {}
for interface in interfaces:
interfaces_dict[interface['interface_id']] = interface
slice_tags = []
# set egre key
slice_tags.append({'name': 'egre_key', 'value': self.free_egre_key()})
# set netns
slice_tags.append({'name': 'netns', 'value': '1'})
# set cap_net_admin
# need to update the attribute string?
slice_tags.append({'name': 'capabilities', 'value': 'CAP_NET_ADMIN'})
for link in requested_links:
# get the ip address of the first node in the link
ifname1 = Xrn(link['interface1']['component_id']).get_leaf()
(node_raw, device) = ifname1.split(':')
node_id = int(node_raw.replace('node', ''))
node = nodes_dict[node_id]
if1 = interfaces_dict[node['interface_ids'][0]]
ipaddr = if1['ip']
topo_rspec = VLink.get_topo_rspec(link, ipaddr)
# set topo_rspec tag
slice_tags.append({'name': 'topo_rspec', 'value': str([topo_rspec]), 'node_id': node_id})
# set vini_topo tag
slice_tags.append({'name': 'vini_topo', 'value': 'manual', 'node_id': node_id})
#self.driver.shell.AddSliceTag(slice['name'], 'topo_rspec', str([topo_rspec]), node_id)
self.verify_slice_attributes(slice, slice_tags, {'append': True}, admin=True)
def handle_peer(self, site, slice, persons, peer):
if peer:
# bind site
try:
if site:
self.driver.shell.BindObjectToPeer('site', site['site_id'], peer['shortname'], slice['site_id'])
except Exception,e:
self.driver.shell.DeleteSite(site['site_id'])
raise e
# bind slice
try:
if slice:
self.driver.shell.BindObjectToPeer('slice', slice['slice_id'], peer['shortname'], slice['slice_id'])
except Exception,e:
self.driver.shell.DeleteSlice(slice['slice_id'])
raise e
# bind persons
for person in persons:
try:
self.driver.shell.BindObjectToPeer('person',
person['person_id'], peer['shortname'], person['peer_person_id'])
for (key, remote_key_id) in zip(person['keys'], person['key_ids']):
try:
self.driver.shell.BindObjectToPeer( 'key', key['key_id'], peer['shortname'], remote_key_id)
except:
self.driver.shell.DeleteKey(key['key_id'])
logger("failed to bind key: %s to peer: %s " % (key['key_id'], peer['shortname']))
except Exception,e:
self.driver.shell.DeletePerson(person['person_id'])
raise e
return slice
def verify_site(self, slice_xrn, slice_record={}, peer=None, sfa_peer=None, options={}):
(slice_hrn, type) = urn_to_hrn(slice_xrn)
site_hrn = get_authority(slice_hrn)
# login base can't be longer than 20 characters
slicename = hrn_to_pl_slicename(slice_hrn)
authority_name = slicename.split('_')[0]
login_base = authority_name[:20]
sites = self.driver.shell.GetSites(login_base)
if not sites:
# create new site record
site = {'name': 'geni.%s' % authority_name,
'abbreviated_name': authority_name,
'login_base': login_base,
'max_slices': 100,
'max_slivers': 1000,
'enabled': True,
'peer_site_id': None}
if peer:
site['peer_site_id'] = slice_record.get('site_id', None)
site['site_id'] = self.driver.shell.AddSite(site)
# exempt federated sites from monitor policies
self.driver.shell.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
# # is this still necessary?
# # add record to the local registry
# if sfa_peer and slice_record:
# peer_dict = {'type': 'authority', 'hrn': site_hrn, \
# 'peer_authority': sfa_peer, 'pointer': site['site_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
else:
site = sites[0]
if peer:
# unbind from peer so we can modify if necessary. Will bind back later
self.driver.shell.UnBindObjectFromPeer('site', site['site_id'], peer['shortname'])
return site
def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, options={}):
slicename = hrn_to_pl_slicename(slice_hrn)
parts = slicename.split("_")
login_base = parts[0]
slices = self.driver.shell.GetSlices([slicename])
if not slices:
slice = {'name': slicename,
'url': slice_record.get('url', slice_hrn),
'description': slice_record.get('description', slice_hrn)}
# add the slice
slice['slice_id'] = self.driver.shell.AddSlice(slice)
slice['node_ids'] = []
slice['person_ids'] = []
if peer:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# mark this slice as an sfa peer record
# if sfa_peer:
# peer_dict = {'type': 'slice', 'hrn': slice_hrn,
# 'peer_authority': sfa_peer, 'pointer': slice['slice_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
else:
slice = slices[0]
if peer:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# unbind from peer so we can modify if necessary. Will bind back later
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
#Update existing record (e.g. expires field) it with the latest info.
if slice_record.get('expires'):
requested_expires = int(datetime_to_epoch(utcparse(slice_record['expires'])))
if requested_expires and slice['expires'] != requested_expires:
self.driver.shell.UpdateSlice( slice['slice_id'], {'expires' : requested_expires})
return slice
#def get_existing_persons(self, users):
def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, options={}):
users_by_email = {}
users_by_site = defaultdict(list)
users_dict = {}
for user in users:
user['urn'] = user['urn'].lower()
hrn, type = urn_to_hrn(user['urn'])
username = get_leaf(hrn)
login_base = PlXrn(xrn=user['urn']).pl_login_base()
user['username'] = username
user['site'] = login_base
if 'email' in user:
user['email'] = user['email'].lower()
users_by_email[user['email']] = user
users_dict[user['email']] = user
else:
users_by_site[user['site']].append(user)
# start building a list of existing users
existing_user_ids = []
existing_user_ids_filter = []
if users_by_email:
existing_user_ids_filter.extend(users_by_email.keys())
if users_by_site:
for login_base in users_by_site:
users = users_by_site[login_base]
for user in users:
existing_user_ids_filter.append(user['username']+'@geni.net')
if existing_user_ids_filter:
# get existing users by email
existing_users = self.driver.shell.GetPersons({'email': existing_user_ids_filter},
['person_id', 'key_ids', 'email'])
existing_user_ids.extend([user['email'] for user in existing_users])
if users_by_site:
# get a list of user sites (based on requeste user urns
site_list = self.driver.shell.GetSites(users_by_site.keys(), \
['site_id', 'login_base', 'person_ids'])
# get all existing users at these sites
sites = {}
site_user_ids = []
for site in site_list:
sites[site['site_id']] = site
site_user_ids.extend(site['person_ids'])
existing_site_persons_list = self.driver.shell.GetPersons(site_user_ids,
['person_id', 'key_ids', 'email', 'site_ids'])
# all requested users are either existing users or new (added) users
for login_base in users_by_site:
requested_site_users = users_by_site[login_base]
for requested_user in requested_site_users:
user_found = False
for existing_user in existing_site_persons_list:
for site_id in existing_user['site_ids']:
if site_id in sites:
site = sites[site_id]
if login_base == site['login_base'] and \
existing_user['email'].startswith(requested_user['username']+'@'):
existing_user_ids.append(existing_user['email'])
requested_user['email'] = existing_user['email']
users_dict[existing_user['email']] = requested_user
user_found = True
break
if user_found:
break
if user_found == False:
fake_email = requested_user['username'] + '@geni.net'
requested_user['email'] = fake_email
users_dict[fake_email] = requested_user
# requested slice users
requested_user_ids = users_dict.keys()
# existing slice users
existing_slice_users_filter = {'person_id': slice_record.get('person_ids', [])}
existing_slice_users = self.driver.shell.GetPersons(existing_slice_users_filter,
['person_id', 'key_ids', 'email'])
existing_slice_user_ids = [user['email'] for user in existing_slice_users]
# users to be added, removed or updated
added_user_ids = set(requested_user_ids).difference(existing_user_ids)
added_slice_user_ids = set(requested_user_ids).difference(existing_slice_user_ids)
removed_user_ids = set(existing_slice_user_ids).difference(requested_user_ids)
updated_user_ids = set(existing_slice_user_ids).intersection(requested_user_ids)
# Remove stale users (only if we are not appending).
# Append by default.
append = options.get('append', True)
if append == False:
for removed_user_id in removed_user_ids:
self.driver.shell.DeletePersonFromSlice(removed_user_id, slice_record['name'])
# update_existing users
updated_users_list = [user for user in users_dict.values() if user['email'] in \
updated_user_ids]
self.verify_keys(existing_slice_users, updated_users_list, peer, options)
added_persons = []
# add new users
for added_user_id in added_user_ids:
added_user = users_dict[added_user_id]
hrn, type = urn_to_hrn(added_user['urn'])
person = {
'first_name': added_user.get('first_name', hrn),
'last_name': added_user.get('last_name', hrn),
'email': added_user_id,
'peer_person_id': None,
'keys': [],
'key_ids': added_user.get('key_ids', []),
}
person['person_id'] = self.driver.shell.AddPerson(person)
if peer:
person['peer_person_id'] = added_user['person_id']
added_persons.append(person)
# enable the account
self.driver.shell.UpdatePerson(person['person_id'], {'enabled': True})
# add person to site
self.driver.shell.AddPersonToSite(added_user_id, added_user['site'])
for key_string in added_user.get('keys', []):
key = {'key':key_string, 'key_type':'ssh'}
key['key_id'] = self.driver.shell.AddPersonKey(person['person_id'], key)
person['keys'].append(key)
# add the registry record
# if sfa_peer:
# peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
# 'pointer': person['person_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
# add person to the slice
self.driver.shell.AddPersonToSlice(added_slice_user_id, slice_record['name'])
# if this is a peer record then it should already be bound to a peer.
# no need to return worry about it getting bound later
return added_persons
def verify_keys(self, persons, users, peer, options={}):
# existing keys
key_ids = []
for person in persons:
key_ids.extend(person['key_ids'])
keylist = self.driver.shell.GetKeys(key_ids, ['key_id', 'key'])
keydict = {}
for key in keylist:
keydict[key['key']] = key['key_id']
existing_keys = keydict.keys()
persondict = {}
for person in persons:
persondict[person['email']] = person
# add new keys
requested_keys = []
updated_persons = []
for user in users:
user_keys = user.get('keys', [])
updated_persons.append(user)
for key_string in user_keys:
requested_keys.append(key_string)
if key_string not in existing_keys:
key = {'key': key_string, 'key_type': 'ssh'}
try:
if peer:
person = persondict[user['email']]
self.driver.shell.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
key['key_id'] = self.driver.shell.AddPersonKey(user['email'], key)
if peer:
key_index = user_keys.index(key['key'])
remote_key_id = user['key_ids'][key_index]
self.driver.shell.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
finally:
if peer:
self.driver.shell.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
# remove old keys (only if we are not appending)
append = options.get('append', True)
if append == False:
removed_keys = set(existing_keys).difference(requested_keys)
for existing_key_id in keydict:
if keydict[existing_key_id] in removed_keys:
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
self.driver.shell.DeleteKey(existing_key_id)
except:
pass
def verify_slice_attributes(self, slice, requested_slice_attributes, options={}, admin=False):
append = options.get('append', True)
# get list of attributes users ar able to manage
filter = {'category': '*slice*'}
if not admin:
filter['|roles'] = ['user']
slice_attributes = self.driver.shell.GetTagTypes(filter)
valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
# get sliver attributes
added_slice_attributes = []
removed_slice_attributes = []
ignored_slice_attribute_names = []
existing_slice_attributes = self.driver.shell.GetSliceTags({'slice_id': slice['slice_id']})
# get attributes that should be removed
for slice_tag in existing_slice_attributes:
if slice_tag['tagname'] in ignored_slice_attribute_names:
# If a slice already has a admin only role it was probably given to them by an
# admin, so we should ignore it.
ignored_slice_attribute_names.append(slice_tag['tagname'])
else:
# If an existing slice attribute was not found in the request it should
# be removed
attribute_found=False
for requested_attribute in requested_slice_attributes:
if requested_attribute['name'] == slice_tag['tagname'] and \
requested_attribute['value'] == slice_tag['value']:
attribute_found=True
break
if not attribute_found and not append:
removed_slice_attributes.append(slice_tag)
# get attributes that should be added:
for requested_attribute in requested_slice_attributes:
# if the requested attribute wasn't found we should add it
if requested_attribute['name'] in valid_slice_attribute_names:
attribute_found = False
for existing_attribute in existing_slice_attributes:
if requested_attribute['name'] == existing_attribute['tagname'] and \
requested_attribute['value'] == existing_attribute['value']:
attribute_found=True
break
if not attribute_found:
added_slice_attributes.append(requested_attribute)
# remove stale attributes
for attribute in removed_slice_attributes:
try:
self.driver.shell.DeleteSliceTag(attribute['slice_tag_id'])
except Exception, e:
logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
% (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
# add requested_attributes
for attribute in added_slice_attributes:
try:
self.driver.shell.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
except Exception, e:
logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
% (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
PL leases: take in account only the leases in the aggregate network
from types import StringTypes
from collections import defaultdict
from sfa.util.sfatime import utcparse, datetime_to_epoch
from sfa.util.sfalogging import logger
from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
from sfa.rspecs.rspec import RSpec
from sfa.planetlab.vlink import VLink
from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname
import time
MAXINT = 2L**31-1
class PlSlices:
rspec_to_slice_tag = {'max_rate':'net_max_rate'}
def __init__(self, driver):
self.driver = driver
def get_slivers(self, xrn, node=None):
hrn, type = urn_to_hrn(xrn)
slice_name = hrn_to_pl_slicename(hrn)
# XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
# of doing all of this?
#return self.driver.shell.GetSliceTicket(self.auth, slice_name)
# from PLCAPI.GetSlivers.get_slivers()
slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids']
slices = self.driver.shell.GetSlices(slice_name, slice_fields)
# Build up list of users and slice attributes
person_ids = set()
all_slice_tag_ids = set()
for slice in slices:
person_ids.update(slice['person_ids'])
all_slice_tag_ids.update(slice['slice_tag_ids'])
person_ids = list(person_ids)
all_slice_tag_ids = list(all_slice_tag_ids)
# Get user information
all_persons_list = self.driver.shell.GetPersons({'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids'])
all_persons = {}
for person in all_persons_list:
all_persons[person['person_id']] = person
# Build up list of keys
key_ids = set()
for person in all_persons.values():
key_ids.update(person['key_ids'])
key_ids = list(key_ids)
# Get user account keys
all_keys_list = self.driver.shell.GetKeys(key_ids, ['key_id', 'key', 'key_type'])
all_keys = {}
for key in all_keys_list:
all_keys[key['key_id']] = key
# Get slice attributes
all_slice_tags_list = self.driver.shell.GetSliceTags(all_slice_tag_ids)
all_slice_tags = {}
for slice_tag in all_slice_tags_list:
all_slice_tags[slice_tag['slice_tag_id']] = slice_tag
slivers = []
for slice in slices:
keys = []
for person_id in slice['person_ids']:
if person_id in all_persons:
person = all_persons[person_id]
if not person['enabled']:
continue
for key_id in person['key_ids']:
if key_id in all_keys:
key = all_keys[key_id]
keys += [{'key_type': key['key_type'],
'key': key['key']}]
attributes = []
# All (per-node and global) attributes for this slice
slice_tags = []
for slice_tag_id in slice['slice_tag_ids']:
if slice_tag_id in all_slice_tags:
slice_tags.append(all_slice_tags[slice_tag_id])
# Per-node sliver attributes take precedence over global
# slice attributes, so set them first.
# Then comes nodegroup slice attributes
# Followed by global slice attributes
sliver_attributes = []
if node is not None:
for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags):
sliver_attributes.append(sliver_attribute['tagname'])
attributes.append({'tagname': sliver_attribute['tagname'],
'value': sliver_attribute['value']})
# set nodegroup slice attributes
for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags):
# Do not set any nodegroup slice attributes for
# which there is at least one sliver attribute
# already set.
if slice_tag not in slice_tags:
attributes.append({'tagname': slice_tag['tagname'],
'value': slice_tag['value']})
for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags):
# Do not set any global slice attributes for
# which there is at least one sliver attribute
# already set.
if slice_tag['tagname'] not in sliver_attributes:
attributes.append({'tagname': slice_tag['tagname'],
'value': slice_tag['value']})
# XXX Sanity check; though technically this should be a system invariant
# checked with an assertion
if slice['expires'] > MAXINT: slice['expires']= MAXINT
slivers.append({
'hrn': hrn,
'name': slice['name'],
'slice_id': slice['slice_id'],
'instantiation': slice['instantiation'],
'expires': slice['expires'],
'keys': keys,
'attributes': attributes
})
return slivers
def get_peer(self, xrn):
hrn, type = urn_to_hrn(xrn)
# Becaues of myplc federation, we first need to determine if this
# slice belongs to out local plc or a myplc peer. We will assume it
# is a local site, unless we find out otherwise
peer = None
# get this slice's authority (site)
slice_authority = get_authority(hrn)
# get this site's authority (sfa root authority or sub authority)
site_authority = get_authority(slice_authority).lower()
# check if we are already peered with this site_authority, if so
peers = self.driver.shell.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
for peer_record in peers:
names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
if site_authority in names:
peer = peer_record
return peer
def get_sfa_peer(self, xrn):
hrn, type = urn_to_hrn(xrn)
# return the authority for this hrn or None if we are the authority
sfa_peer = None
slice_authority = get_authority(hrn)
site_authority = get_authority(slice_authority)
if site_authority != self.driver.hrn:
sfa_peer = site_authority
return sfa_peer
def verify_slice_leases(self, slice, rspec_requested_leases, peer):
leases = self.driver.shell.GetLeases({'name':slice['name'], 'clip':int(time.time())}, ['lease_id','name', 'hostname', 't_from', 't_until'])
grain = self.driver.shell.GetLeaseGranularity()
requested_leases = []
for lease in rspec_requested_leases:
requested_lease = {}
slice_name = hrn_to_pl_slicename(lease['slice_id'])
if slice_name != slice['name']:
continue
elif Xrn(lease['component_id']).get_authority_urn().split(':')[0] != self.driver.hrn:
continue
hostname = xrn_to_hostname(lease['component_id'])
# fill the requested node with nitos ids
requested_lease['name'] = slice['name']
requested_lease['hostname'] = hostname
requested_lease['t_from'] = int(lease['start_time'])
requested_lease['t_until'] = int(lease['duration']) * grain + int(lease['start_time'])
requested_leases.append(requested_lease)
# prepare actual slice leases by lease_id
leases_by_id = {}
for lease in leases:
leases_by_id[lease['lease_id']] = {'name': lease['name'], 'hostname': lease['hostname'], \
't_from': lease['t_from'], 't_until': lease['t_until']}
added_leases = []
kept_leases_id = []
deleted_leases_id = []
for lease_id in leases_by_id:
if leases_by_id[lease_id] not in requested_leases:
deleted_leases_id.append(lease_id)
else:
kept_leases_id.append(lease_id)
requested_leases.remove(leases_by_id[lease_id])
added_leases = requested_leases
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
self.driver.shell.DeleteLeases(deleted_leases_id)
for lease in added_leases:
self.driver.shell.AddLeases(lease['hostname'], slice['name'], lease['t_from'], lease['t_until'])
except:
logger.log_exc('Failed to add/remove slice leases')
return leases
def verify_slice_nodes(self, slice, slivers, peer):
nodes = self.driver.shell.GetNodes(slice['node_ids'], ['node_id', 'hostname', 'interface_ids'])
current_slivers = [node['hostname'] for node in nodes]
requested_slivers = []
tags = []
for node in slivers:
hostname = None
if node.get('component_name'):
hostname = node.get('component_name').strip()
elif node.get('component_id'):
hostname = xrn_to_hostname(node.get('component_id').strip())
if node.get('client_id'):
tags.append({'slicename': slice['name'],
'tagname': 'client_id',
'value': node['client_id'],
'node': hostname})
if hostname:
requested_slivers.append(hostname)
# remove nodes not in rspec
deleted_nodes = list(set(current_slivers).difference(requested_slivers))
# add nodes from rspec
added_nodes = list(set(requested_slivers).difference(current_slivers))
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
self.driver.shell.AddSliceToNodes(slice['name'], added_nodes)
self.driver.shell.DeleteSliceFromNodes(slice['name'], deleted_nodes)
except:
logger.log_exc('Failed to add/remove slice from nodes')
# add tags
for tag in tags:
try:
self.driver.shell.AddSliceTag(tag['slicename'], tag['tagname'], tag['value'], tag['node'])
except:
logger.log_exc('Failed to add slice tag')
return nodes
def free_egre_key(self):
used = set()
for tag in self.driver.shell.GetSliceTags({'tagname': 'egre_key'}):
used.add(int(tag['value']))
for i in range(1, 256):
if i not in used:
key = i
break
else:
raise KeyError("No more EGRE keys available")
return str(key)
def verify_slice_links(self, slice, requested_links, nodes):
# nodes is undefined here
if not requested_links:
return
# build dict of nodes
nodes_dict = {}
interface_ids = []
for node in nodes:
nodes_dict[node['node_id']] = node
interface_ids.extend(node['interface_ids'])
# build dict of interfaces
interfaces = self.driver.shell.GetInterfaces(interface_ids)
interfaces_dict = {}
for interface in interfaces:
interfaces_dict[interface['interface_id']] = interface
slice_tags = []
# set egre key
slice_tags.append({'name': 'egre_key', 'value': self.free_egre_key()})
# set netns
slice_tags.append({'name': 'netns', 'value': '1'})
# set cap_net_admin
# need to update the attribute string?
slice_tags.append({'name': 'capabilities', 'value': 'CAP_NET_ADMIN'})
for link in requested_links:
# get the ip address of the first node in the link
ifname1 = Xrn(link['interface1']['component_id']).get_leaf()
(node_raw, device) = ifname1.split(':')
node_id = int(node_raw.replace('node', ''))
node = nodes_dict[node_id]
if1 = interfaces_dict[node['interface_ids'][0]]
ipaddr = if1['ip']
topo_rspec = VLink.get_topo_rspec(link, ipaddr)
# set topo_rspec tag
slice_tags.append({'name': 'topo_rspec', 'value': str([topo_rspec]), 'node_id': node_id})
# set vini_topo tag
slice_tags.append({'name': 'vini_topo', 'value': 'manual', 'node_id': node_id})
#self.driver.shell.AddSliceTag(slice['name'], 'topo_rspec', str([topo_rspec]), node_id)
self.verify_slice_attributes(slice, slice_tags, {'append': True}, admin=True)
def handle_peer(self, site, slice, persons, peer):
if peer:
# bind site
try:
if site:
self.driver.shell.BindObjectToPeer('site', site['site_id'], peer['shortname'], slice['site_id'])
except Exception,e:
self.driver.shell.DeleteSite(site['site_id'])
raise e
# bind slice
try:
if slice:
self.driver.shell.BindObjectToPeer('slice', slice['slice_id'], peer['shortname'], slice['slice_id'])
except Exception,e:
self.driver.shell.DeleteSlice(slice['slice_id'])
raise e
# bind persons
for person in persons:
try:
self.driver.shell.BindObjectToPeer('person',
person['person_id'], peer['shortname'], person['peer_person_id'])
for (key, remote_key_id) in zip(person['keys'], person['key_ids']):
try:
self.driver.shell.BindObjectToPeer( 'key', key['key_id'], peer['shortname'], remote_key_id)
except:
self.driver.shell.DeleteKey(key['key_id'])
logger("failed to bind key: %s to peer: %s " % (key['key_id'], peer['shortname']))
except Exception,e:
self.driver.shell.DeletePerson(person['person_id'])
raise e
return slice
def verify_site(self, slice_xrn, slice_record={}, peer=None, sfa_peer=None, options={}):
(slice_hrn, type) = urn_to_hrn(slice_xrn)
site_hrn = get_authority(slice_hrn)
# login base can't be longer than 20 characters
slicename = hrn_to_pl_slicename(slice_hrn)
authority_name = slicename.split('_')[0]
login_base = authority_name[:20]
sites = self.driver.shell.GetSites(login_base)
if not sites:
# create new site record
site = {'name': 'geni.%s' % authority_name,
'abbreviated_name': authority_name,
'login_base': login_base,
'max_slices': 100,
'max_slivers': 1000,
'enabled': True,
'peer_site_id': None}
if peer:
site['peer_site_id'] = slice_record.get('site_id', None)
site['site_id'] = self.driver.shell.AddSite(site)
# exempt federated sites from monitor policies
self.driver.shell.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
# # is this still necessary?
# # add record to the local registry
# if sfa_peer and slice_record:
# peer_dict = {'type': 'authority', 'hrn': site_hrn, \
# 'peer_authority': sfa_peer, 'pointer': site['site_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
else:
site = sites[0]
if peer:
# unbind from peer so we can modify if necessary. Will bind back later
self.driver.shell.UnBindObjectFromPeer('site', site['site_id'], peer['shortname'])
return site
def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, options={}):
slicename = hrn_to_pl_slicename(slice_hrn)
parts = slicename.split("_")
login_base = parts[0]
slices = self.driver.shell.GetSlices([slicename])
if not slices:
slice = {'name': slicename,
'url': slice_record.get('url', slice_hrn),
'description': slice_record.get('description', slice_hrn)}
# add the slice
slice['slice_id'] = self.driver.shell.AddSlice(slice)
slice['node_ids'] = []
slice['person_ids'] = []
if peer:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# mark this slice as an sfa peer record
# if sfa_peer:
# peer_dict = {'type': 'slice', 'hrn': slice_hrn,
# 'peer_authority': sfa_peer, 'pointer': slice['slice_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
else:
slice = slices[0]
if peer:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# unbind from peer so we can modify if necessary. Will bind back later
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
#Update existing record (e.g. expires field) it with the latest info.
if slice_record.get('expires'):
requested_expires = int(datetime_to_epoch(utcparse(slice_record['expires'])))
if requested_expires and slice['expires'] != requested_expires:
self.driver.shell.UpdateSlice( slice['slice_id'], {'expires' : requested_expires})
return slice
#def get_existing_persons(self, users):
def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, options={}):
users_by_email = {}
users_by_site = defaultdict(list)
users_dict = {}
for user in users:
user['urn'] = user['urn'].lower()
hrn, type = urn_to_hrn(user['urn'])
username = get_leaf(hrn)
login_base = PlXrn(xrn=user['urn']).pl_login_base()
user['username'] = username
user['site'] = login_base
if 'email' in user:
user['email'] = user['email'].lower()
users_by_email[user['email']] = user
users_dict[user['email']] = user
else:
users_by_site[user['site']].append(user)
# start building a list of existing users
existing_user_ids = []
existing_user_ids_filter = []
if users_by_email:
existing_user_ids_filter.extend(users_by_email.keys())
if users_by_site:
for login_base in users_by_site:
users = users_by_site[login_base]
for user in users:
existing_user_ids_filter.append(user['username']+'@geni.net')
if existing_user_ids_filter:
# get existing users by email
existing_users = self.driver.shell.GetPersons({'email': existing_user_ids_filter},
['person_id', 'key_ids', 'email'])
existing_user_ids.extend([user['email'] for user in existing_users])
if users_by_site:
# get a list of user sites (based on requeste user urns
site_list = self.driver.shell.GetSites(users_by_site.keys(), \
['site_id', 'login_base', 'person_ids'])
# get all existing users at these sites
sites = {}
site_user_ids = []
for site in site_list:
sites[site['site_id']] = site
site_user_ids.extend(site['person_ids'])
existing_site_persons_list = self.driver.shell.GetPersons(site_user_ids,
['person_id', 'key_ids', 'email', 'site_ids'])
# all requested users are either existing users or new (added) users
for login_base in users_by_site:
requested_site_users = users_by_site[login_base]
for requested_user in requested_site_users:
user_found = False
for existing_user in existing_site_persons_list:
for site_id in existing_user['site_ids']:
if site_id in sites:
site = sites[site_id]
if login_base == site['login_base'] and \
existing_user['email'].startswith(requested_user['username']+'@'):
existing_user_ids.append(existing_user['email'])
requested_user['email'] = existing_user['email']
users_dict[existing_user['email']] = requested_user
user_found = True
break
if user_found:
break
if user_found == False:
fake_email = requested_user['username'] + '@geni.net'
requested_user['email'] = fake_email
users_dict[fake_email] = requested_user
# requested slice users
requested_user_ids = users_dict.keys()
# existing slice users
existing_slice_users_filter = {'person_id': slice_record.get('person_ids', [])}
existing_slice_users = self.driver.shell.GetPersons(existing_slice_users_filter,
['person_id', 'key_ids', 'email'])
existing_slice_user_ids = [user['email'] for user in existing_slice_users]
# users to be added, removed or updated
added_user_ids = set(requested_user_ids).difference(existing_user_ids)
added_slice_user_ids = set(requested_user_ids).difference(existing_slice_user_ids)
removed_user_ids = set(existing_slice_user_ids).difference(requested_user_ids)
updated_user_ids = set(existing_slice_user_ids).intersection(requested_user_ids)
# Remove stale users (only if we are not appending).
# Append by default.
append = options.get('append', True)
if append == False:
for removed_user_id in removed_user_ids:
self.driver.shell.DeletePersonFromSlice(removed_user_id, slice_record['name'])
# update_existing users
updated_users_list = [user for user in users_dict.values() if user['email'] in \
updated_user_ids]
self.verify_keys(existing_slice_users, updated_users_list, peer, options)
added_persons = []
# add new users
for added_user_id in added_user_ids:
added_user = users_dict[added_user_id]
hrn, type = urn_to_hrn(added_user['urn'])
person = {
'first_name': added_user.get('first_name', hrn),
'last_name': added_user.get('last_name', hrn),
'email': added_user_id,
'peer_person_id': None,
'keys': [],
'key_ids': added_user.get('key_ids', []),
}
person['person_id'] = self.driver.shell.AddPerson(person)
if peer:
person['peer_person_id'] = added_user['person_id']
added_persons.append(person)
# enable the account
self.driver.shell.UpdatePerson(person['person_id'], {'enabled': True})
# add person to site
self.driver.shell.AddPersonToSite(added_user_id, added_user['site'])
for key_string in added_user.get('keys', []):
key = {'key':key_string, 'key_type':'ssh'}
key['key_id'] = self.driver.shell.AddPersonKey(person['person_id'], key)
person['keys'].append(key)
# add the registry record
# if sfa_peer:
# peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
# 'pointer': person['person_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
# add person to the slice
self.driver.shell.AddPersonToSlice(added_slice_user_id, slice_record['name'])
# if this is a peer record then it should already be bound to a peer.
# no need to return worry about it getting bound later
return added_persons
def verify_keys(self, persons, users, peer, options={}):
# existing keys
key_ids = []
for person in persons:
key_ids.extend(person['key_ids'])
keylist = self.driver.shell.GetKeys(key_ids, ['key_id', 'key'])
keydict = {}
for key in keylist:
keydict[key['key']] = key['key_id']
existing_keys = keydict.keys()
persondict = {}
for person in persons:
persondict[person['email']] = person
# add new keys
requested_keys = []
updated_persons = []
for user in users:
user_keys = user.get('keys', [])
updated_persons.append(user)
for key_string in user_keys:
requested_keys.append(key_string)
if key_string not in existing_keys:
key = {'key': key_string, 'key_type': 'ssh'}
try:
if peer:
person = persondict[user['email']]
self.driver.shell.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
key['key_id'] = self.driver.shell.AddPersonKey(user['email'], key)
if peer:
key_index = user_keys.index(key['key'])
remote_key_id = user['key_ids'][key_index]
self.driver.shell.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
finally:
if peer:
self.driver.shell.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
# remove old keys (only if we are not appending)
append = options.get('append', True)
if append == False:
removed_keys = set(existing_keys).difference(requested_keys)
for existing_key_id in keydict:
if keydict[existing_key_id] in removed_keys:
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
self.driver.shell.DeleteKey(existing_key_id)
except:
pass
def verify_slice_attributes(self, slice, requested_slice_attributes, options={}, admin=False):
append = options.get('append', True)
# get list of attributes users ar able to manage
filter = {'category': '*slice*'}
if not admin:
filter['|roles'] = ['user']
slice_attributes = self.driver.shell.GetTagTypes(filter)
valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
# get sliver attributes
added_slice_attributes = []
removed_slice_attributes = []
ignored_slice_attribute_names = []
existing_slice_attributes = self.driver.shell.GetSliceTags({'slice_id': slice['slice_id']})
# get attributes that should be removed
for slice_tag in existing_slice_attributes:
if slice_tag['tagname'] in ignored_slice_attribute_names:
# If a slice already has a admin only role it was probably given to them by an
# admin, so we should ignore it.
ignored_slice_attribute_names.append(slice_tag['tagname'])
else:
# If an existing slice attribute was not found in the request it should
# be removed
attribute_found=False
for requested_attribute in requested_slice_attributes:
if requested_attribute['name'] == slice_tag['tagname'] and \
requested_attribute['value'] == slice_tag['value']:
attribute_found=True
break
if not attribute_found and not append:
removed_slice_attributes.append(slice_tag)
# get attributes that should be added:
for requested_attribute in requested_slice_attributes:
# if the requested attribute wasn't found we should add it
if requested_attribute['name'] in valid_slice_attribute_names:
attribute_found = False
for existing_attribute in existing_slice_attributes:
if requested_attribute['name'] == existing_attribute['tagname'] and \
requested_attribute['value'] == existing_attribute['value']:
attribute_found=True
break
if not attribute_found:
added_slice_attributes.append(requested_attribute)
# remove stale attributes
for attribute in removed_slice_attributes:
try:
self.driver.shell.DeleteSliceTag(attribute['slice_tag_id'])
except Exception, e:
logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
% (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
# add requested_attributes
for attribute in added_slice_attributes:
try:
self.driver.shell.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
except Exception, e:
logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
% (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
|
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort
)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def GetConsoleScreenBufferInfo(stream_id):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
# This fails when imported via setup.py when installing using 'pip'
# presumably the fix is that running setup.py should not trigger all
# this activity.
# assert success
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
success = windll.kernel32.SetConsoleTextAttribute(handle, attrs)
assert success
def SetConsoleCursorPosition(stream_id, position):
handle = handles[stream_id]
position = COORD(*position)
success = windll.kernel32.SetConsoleCursorPosition(handle, position)
assert success
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = TCHAR(char)
length = DWORD(length)
start = COORD(*start)
num_written = DWORD(0)
# AttributeError: function 'FillConsoleOutputCharacter' not found
# could it just be that my types are wrong?
success = windll.kernel32.FillConsoleOutputCharacter(
handle, char, length, start, byref(num_written))
assert success
return num_written.value
if __name__=='__main__':
x = GetConsoleScreenBufferInfo(STDOUT)
print x.dwSize
print x.dwCursorPosition
print x.wAttributes
print x.srWindow
print x.dwMaximumWindowSize
python3 compatible: prints in win32.py
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort
)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def GetConsoleScreenBufferInfo(stream_id):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
# This fails when imported via setup.py when installing using 'pip'
# presumably the fix is that running setup.py should not trigger all
# this activity.
# assert success
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
success = windll.kernel32.SetConsoleTextAttribute(handle, attrs)
assert success
def SetConsoleCursorPosition(stream_id, position):
handle = handles[stream_id]
position = COORD(*position)
success = windll.kernel32.SetConsoleCursorPosition(handle, position)
assert success
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = TCHAR(char)
length = DWORD(length)
start = COORD(*start)
num_written = DWORD(0)
# AttributeError: function 'FillConsoleOutputCharacter' not found
# could it just be that my types are wrong?
success = windll.kernel32.FillConsoleOutputCharacter(
handle, char, length, start, byref(num_written))
assert success
return num_written.value
if __name__=='__main__':
x = GetConsoleScreenBufferInfo(STDOUT)
print(x.dwSize)
print(x.dwCursorPosition)
print(x.wAttributes)
print(x.srWindow)
print(x.dwMaximumWindowSize)
|
# -*- coding: utf-8 -*-
#
# TurboRedis documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 27 00:45:45 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TurboRedis'
copyright = u'2014, Espen Notodden'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TurboRedisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TurboRedis.tex', u'TurboRedis Documentation',
u'Espen Notodden', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'turboredis', u'TurboRedis Documentation',
[u'Espen Notodden'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TurboRedis', u'TurboRedis Documentation',
u'Espen Notodden', 'TurboRedis', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
import alabaster
html_theme_path = [alabaster.get_path()]
extensions = ['alabaster']
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
]
}
set github options for theme
# -*- coding: utf-8 -*-
#
# TurboRedis documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 27 00:45:45 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TurboRedis'
copyright = u'2014, Espen Notodden'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TurboRedisdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TurboRedis.tex', u'TurboRedis Documentation',
u'Espen Notodden', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'turboredis', u'TurboRedis Documentation',
[u'Espen Notodden'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TurboRedis', u'TurboRedis Documentation',
u'Espen Notodden', 'TurboRedis', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
html_theme_options = {
"github_button": False,
"github_banner": True,
"github_user": "enotodden",
"github_repo": "turboredis"
}
import alabaster
html_theme_path = [alabaster.get_path()]
extensions = ['alabaster']
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
]
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^robots\.txt$', lambda r: HttpResponse("User-agent: *\nDisallow: /", content_type="text/plain")),
url(r'^$', 'wedding.views.home', name='home'),
url(r'^ceremony/', 'wedding.views.ceremony', name='ceremony'),
url(r'^wa_reception/', 'wedding.views.reception', name='reception'),
url(r'^wv_reception/', 'wedding.views.wv_reception', name='wv_reception'),
url(r'^rsvp/(?P<code>[0-9a-z]+)$', 'wedding.views.rsvp', name='rsvp'),
url(r'^rsvp/', 'wedding.views.rsvp', name='rsvp'),
url(r'^admin/', include(admin.site.urls)),
)
fixed bug in robots.txt
from django.conf.urls import patterns, include, url
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib import admin
urlpatterns = patterns('',
url(r'^robots\.txt$', lambda r: HttpResponse("User-agent: *\nDisallow: /", content_type="text/plain")),
url(r'^$', 'wedding.views.home', name='home'),
url(r'^ceremony/', 'wedding.views.ceremony', name='ceremony'),
url(r'^wa_reception/', 'wedding.views.reception', name='reception'),
url(r'^wv_reception/', 'wedding.views.wv_reception', name='wv_reception'),
url(r'^rsvp/(?P<code>[0-9a-z]+)$', 'wedding.views.rsvp', name='rsvp'),
url(r'^rsvp/', 'wedding.views.rsvp', name='rsvp'),
url(r'^admin/', include(admin.site.urls)),
)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
import numpy as np
import nibabel as nib
from fcnn_lung2d import BatcherCTLung2D
from fcnn_lesion3d import BatcherCTLesion3D
import json
import skimage.io as skio
import app.core.preprocessing as preproc
from app.core.preprocessing import resizeNii, resize3D
#########################################
def segmentLungs25D(pathInpNii, dirWithModel, pathOutNii=None, outSize=None, batchSize=8, isDebug=False, threshold=None):
if isinstance(pathInpNii,str) or isinstance(pathInpNii,unicode):
isInpFromFile = True
if not os.path.isfile(pathInpNii):
raise Exception('Cant find input file [%s]' % pathInpNii)
else:
isInpFromFile = False
if not os.path.isdir(dirWithModel):
raise Exception('Cant find directory with model [%s]' % dirWithModel)
if pathOutNii is not None:
outDir = os.path.dirname(os.path.abspath(pathOutNii))
if not os.path.isdir(outDir):
raise Exception('Cant find output directory [%s], create directory for output file before this call' % outDir)
batcherInfer = BatcherCTLung2D()
batcherInfer.loadModelForInference(pathModelJson=dirWithModel, pathMeanData=dirWithModel)
if isDebug:
batcherInfer.model.summary()
lstPathNifti = [ pathInpNii ]
ret = batcherInfer.inference(lstPathNifti, batchSize=batchSize, isDebug=isDebug)
outMsk = ret[0]
if isInpFromFile:
tmpNii = nib.load(pathInpNii)
else:
tmpNii = pathInpNii
#
outMskNii = nib.Nifti1Image(outMsk.copy().astype(np.float16), tmpNii.affine, header=tmpNii.header)
# resize if need:
if outSize is not None:
outMskNii = resizeNii(outMskNii, newSize=outSize)
# threshold if need:
if threshold is not None:
outMskNii = nib.Nifti1Image( (outMskNii.get_data()>threshold).astype(np.float16), outMskNii.affine, header=outMskNii.header)
# save if output path is present
if pathOutNii is not None:
nib.save(outMskNii, pathOutNii)
# pathOutNii = '%s-segm.nii.gz' % pathInpNii
else:
return outMskNii
#########################################
def segmentLesions3D(pathInpNii, dirWithModel, pathOutNii=None, outSize=None, isDebug=False, threshold=None):
if isinstance(pathInpNii, str) or isinstance(pathInpNii, unicode):
isInpFromFile = True
if not os.path.isfile(pathInpNii):
raise Exception('Cant find input file [%s]' % pathInpNii)
else:
isInpFromFile = False
if not os.path.isdir(dirWithModel):
raise Exception('Cant find directory with model [%s]' % dirWithModel)
if pathOutNii is not None:
outDir = os.path.dirname(os.path.abspath(pathOutNii))
if not os.path.isdir(outDir):
raise Exception(
'Cant find output directory [%s], create directory for output file before this call' % outDir)
batcherInfer = BatcherCTLesion3D()
batcherInfer.loadModelForInference(pathModelJson=dirWithModel, pathMeanData=dirWithModel)
if isDebug:
batcherInfer.model.summary()
ret = batcherInfer.inference([pathInpNii], batchSize=1)
if batcherInfer.isTheanoShape:
outMsk = ret[0][1, :, :, :]
else:
outMsk = ret[0][:, :, :, 1]
if isInpFromFile:
tmpNii = nib.load(pathInpNii)
else:
tmpNii = pathInpNii
#
outMskNii = nib.Nifti1Image(outMsk.copy().astype(np.float16), tmpNii.affine, header=tmpNii.header)
if outSize is not None:
outMskNii = resizeNii(outMskNii, newSize=outSize)
if threshold is not None:
outMskNii = nib.Nifti1Image((outMskNii.get_data() > threshold).astype(np.float16),
outMskNii.affine,
header=outMskNii.header)
if pathOutNii is not None:
nib.save(outMskNii, pathOutNii)
# pathOutNii = '%s-segm.nii.gz' % pathInpNii
else:
return outMskNii
#########################################
def api_segmentLungAndLesion(dirModelLung, dirModelLesion, series,
ptrLogger=None,
shape4Lung = (256, 256, 64), shape4Lesi = (128, 128, 64)):
# (1) msg-helpers
def msgInfo(msg):
if ptrLogger is not None:
ptrLogger.info(msg)
else:
print (msg)
def msgErr(msg):
if ptrLogger is not None:
ptrLogger.error(msg)
else:
print (msg)
# (2.1) check data
if not series.isInitialized():
msgErr('Series is not initialized, skip .. [{0}]'.format(series))
return False
# if not series.isDownloaded():
# msgErr('Series data is not downloaded, skip .. [{0}]'.format(series))
# return False
if not series.isConverted():
msgErr('Series DICOM data is not converted to Nifti format, skip .. [{0}]'.format(series))
return False
# (2.2) check existing files
pathNii = series.pathConvertedNifti(isRelative=False)
pathSegmLungs = series.pathPostprocLungs(isRelative=False)
pathSegmLesions = series.pathPostprocLesions(isRelative=False)
if os.path.isfile(pathSegmLungs) and os.path.isfile(pathSegmLesions):
msgInfo('Series data is already segmented, skip task ... [{0}]'.format(series))
return False
else:
# (2.3.1) load and resize
try:
dataNii = nib.load(pathNii)
shapeOrig = dataNii.shape
niiResiz4Lung = resizeNii(dataNii, shape4Lung)
niiResiz4Lesi = resizeNii(dataNii, shape4Lesi)
except Exception as err:
msgErr('Cant load and resize input nifti file [{0}] : {1}, for series [{2}]'.format(pathNii, err, series))
return False
# (2.3.2) segment lungs
try:
lungMask = segmentLungs25D(niiResiz4Lung,
dirWithModel=dirModelLung,
pathOutNii=None,
outSize=shapeOrig,
# outSize=shape4Lung,
threshold=0.5)
except Exception as err:
msgErr('Cant segment lungs for file [{0}] : {1}, for series [{2}]'.format(pathNii, err, series))
return False
# (2.3.3) segment lesions
try:
lesionMask = segmentLesions3D(niiResiz4Lesi,
dirWithModel=dirModelLesion,
pathOutNii=None,
outSize=shapeOrig,
# outSize=shape4Lung,
threshold=None)
except Exception as err:
msgErr('Cant segment lesions for file [{0}] : {1}, for series [{2}]'.format(pathNii, err, series))
return False
# (2.3.4) save results
try:
nib.save(lungMask, pathSegmLungs)
nib.save(lesionMask, pathSegmLesions)
except Exception as err:
msgErr('Cant save segmentation results to file [{0}] : {1}, for series [{2}]'.format(pathSegmLesions, err, series))
return False
return True
def api_generateAllReports(series,
dirModelLung, dirModelLesion,
ptrLogger=None,
shape4Lung = (256, 256, 64), shape4Lesi = (128, 128, 64)):
# (1) msg-helpers
def msgInfo(msg):
if ptrLogger is not None:
ptrLogger.info(msg)
else:
print (msg)
def msgErr(msg):
if ptrLogger is not None:
ptrLogger.error(msg)
else:
print (msg)
# (0) prepare path-variables
pathNii = series.pathConvertedNifti(isRelative=False)
pathSegmLungs = series.pathPostprocLungs(isRelative=False)
pathSegmLesions = series.pathPostprocLesions(isRelative=False)
pathPreview = series.pathPostprocPreview(isRelative=False)
pathReport = series.pathPostprocReport(isRelative=False)
# (1) Lung/Lesions segmentation
retSegm = api_segmentLungAndLesion(dirModelLung=dirModelLung,
dirModelLesion=dirModelLesion,
series=series,
ptrLogger=ptrLogger,
shape4Lung=shape4Lung,
shape4Lesi=shape4Lesi)
msgInfo('Segmentation Lung/Lesion isOk = {0}'.format(retSegm))
if (not os.path.isfile(pathSegmLungs)) or (not os.path.isfile(pathSegmLesions)):
msgErr('Cant segment Lung/Lesion, skip... [{0}]'.format(series))
return False
# (2) calc lesion score
try:
niiLung = nib.load(pathSegmLungs)
niiLesion = nib.load(pathSegmLesions)
except Exception as err:
msgErr('Cant load Lung/Lesion Nifti data: [{0}], for {1}'.format(err, pathSegmLesions))
return False
try:
retLesionScore = preproc.prepareLesionDistribInfo(niiLung, niiLesion)
except Exception as err:
msgErr('Cant evaluate Lesion-score: [{0}], for {1}'.format(err, pathSegmLesions))
return False
# (3) prepare short report about lungs
try:
niiLungDiv, _ = preproc.makeLungedMaskNii(niiLung)
retLungInfo = preproc.prepareLungSizeInfoNii(niiLungDiv)
except Exception as err:
msgErr('Cant get Lung information : [{0}], for {1}'.format(err, series))
return False
# (4) generate preview & save preview image
try:
dataImg = preproc.normalizeCTImage(nib.load(pathNii).get_data())
dataMsk = niiLung.get_data()
dataLes = niiLesion.get_data()
imgPreview = preproc.makePreview4Lesion(dataImg, dataMsk, dataLes)
imgPreviewJson = {
"description": "CT Lesion preview",
"content-type": "image/png",
"xsize": imgPreview.shape[1],
"ysize": imgPreview.shape[0],
"url": os.path.basename(pathPreview)
}
skio.imsave(pathPreview, imgPreview)
except Exception as err:
msgErr('Cant generate preview image : [{0}], for {1}'.format(err, series))
return False
# (5) generate & save JSON report
try:
jsonReport = preproc.getJsonReport(series=series,
reportLesionScore=retLesionScore,
reportLungs=retLungInfo,
lstImgJson=[imgPreviewJson])
with open(pathReport, 'w') as f:
f.write(json.dumps(jsonReport, indent=4))
except Exception as err:
msgErr('Cant generate final JSON report : [{0}], for {1}'.format(err, series))
return False
# FIXME: append PDF generation in future here
# (6) generate PDF preview
return True
#########################################
if __name__ == '__main__':
print ('---')
Fix. I think this field is not necessary.
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import os
import numpy as np
import nibabel as nib
from fcnn_lung2d import BatcherCTLung2D
from fcnn_lesion3d import BatcherCTLesion3D
import json
import skimage.io as skio
import app.core.preprocessing as preproc
from app.core.preprocessing import resizeNii, resize3D
#########################################
def segmentLungs25D(pathInpNii, dirWithModel, pathOutNii=None, outSize=None, batchSize=8, isDebug=False, threshold=None):
if isinstance(pathInpNii,str) or isinstance(pathInpNii,unicode):
isInpFromFile = True
if not os.path.isfile(pathInpNii):
raise Exception('Cant find input file [%s]' % pathInpNii)
else:
isInpFromFile = False
if not os.path.isdir(dirWithModel):
raise Exception('Cant find directory with model [%s]' % dirWithModel)
if pathOutNii is not None:
outDir = os.path.dirname(os.path.abspath(pathOutNii))
if not os.path.isdir(outDir):
raise Exception('Cant find output directory [%s], create directory for output file before this call' % outDir)
batcherInfer = BatcherCTLung2D()
batcherInfer.loadModelForInference(pathModelJson=dirWithModel, pathMeanData=dirWithModel)
if isDebug:
batcherInfer.model.summary()
lstPathNifti = [ pathInpNii ]
ret = batcherInfer.inference(lstPathNifti, batchSize=batchSize, isDebug=isDebug)
outMsk = ret[0]
if isInpFromFile:
tmpNii = nib.load(pathInpNii)
else:
tmpNii = pathInpNii
#
outMskNii = nib.Nifti1Image(outMsk.copy().astype(np.float16), tmpNii.affine, header=tmpNii.header)
# resize if need:
if outSize is not None:
outMskNii = resizeNii(outMskNii, newSize=outSize)
# threshold if need:
if threshold is not None:
outMskNii = nib.Nifti1Image( (outMskNii.get_data()>threshold).astype(np.float16), outMskNii.affine, header=outMskNii.header)
# save if output path is present
if pathOutNii is not None:
nib.save(outMskNii, pathOutNii)
# pathOutNii = '%s-segm.nii.gz' % pathInpNii
else:
return outMskNii
#########################################
def segmentLesions3D(pathInpNii, dirWithModel, pathOutNii=None, outSize=None, isDebug=False, threshold=None):
if isinstance(pathInpNii, str) or isinstance(pathInpNii, unicode):
isInpFromFile = True
if not os.path.isfile(pathInpNii):
raise Exception('Cant find input file [%s]' % pathInpNii)
else:
isInpFromFile = False
if not os.path.isdir(dirWithModel):
raise Exception('Cant find directory with model [%s]' % dirWithModel)
if pathOutNii is not None:
outDir = os.path.dirname(os.path.abspath(pathOutNii))
if not os.path.isdir(outDir):
raise Exception(
'Cant find output directory [%s], create directory for output file before this call' % outDir)
batcherInfer = BatcherCTLesion3D()
batcherInfer.loadModelForInference(pathModelJson=dirWithModel, pathMeanData=dirWithModel)
if isDebug:
batcherInfer.model.summary()
ret = batcherInfer.inference([pathInpNii], batchSize=1)
if batcherInfer.isTheanoShape:
outMsk = ret[0][1, :, :, :]
else:
outMsk = ret[0][:, :, :, 1]
if isInpFromFile:
tmpNii = nib.load(pathInpNii)
else:
tmpNii = pathInpNii
#
outMskNii = nib.Nifti1Image(outMsk.copy().astype(np.float16), tmpNii.affine, header=tmpNii.header)
if outSize is not None:
outMskNii = resizeNii(outMskNii, newSize=outSize)
if threshold is not None:
outMskNii = nib.Nifti1Image((outMskNii.get_data() > threshold).astype(np.float16),
outMskNii.affine,
header=outMskNii.header)
if pathOutNii is not None:
nib.save(outMskNii, pathOutNii)
# pathOutNii = '%s-segm.nii.gz' % pathInpNii
else:
return outMskNii
#########################################
def api_segmentLungAndLesion(dirModelLung, dirModelLesion, series,
ptrLogger=None,
shape4Lung = (256, 256, 64), shape4Lesi = (128, 128, 64)):
# (1) msg-helpers
def msgInfo(msg):
if ptrLogger is not None:
ptrLogger.info(msg)
else:
print (msg)
def msgErr(msg):
if ptrLogger is not None:
ptrLogger.error(msg)
else:
print (msg)
# (2.1) check data
if not series.isInitialized():
msgErr('Series is not initialized, skip .. [{0}]'.format(series))
return False
# if not series.isDownloaded():
# msgErr('Series data is not downloaded, skip .. [{0}]'.format(series))
# return False
if not series.isConverted():
msgErr('Series DICOM data is not converted to Nifti format, skip .. [{0}]'.format(series))
return False
# (2.2) check existing files
pathNii = series.pathConvertedNifti(isRelative=False)
pathSegmLungs = series.pathPostprocLungs(isRelative=False)
pathSegmLesions = series.pathPostprocLesions(isRelative=False)
if os.path.isfile(pathSegmLungs) and os.path.isfile(pathSegmLesions):
msgInfo('Series data is already segmented, skip task ... [{0}]'.format(series))
return False
else:
# (2.3.1) load and resize
try:
dataNii = nib.load(pathNii)
shapeOrig = dataNii.shape
niiResiz4Lung = resizeNii(dataNii, shape4Lung)
niiResiz4Lesi = resizeNii(dataNii, shape4Lesi)
except Exception as err:
msgErr('Cant load and resize input nifti file [{0}] : {1}, for series [{2}]'.format(pathNii, err, series))
return False
# (2.3.2) segment lungs
try:
lungMask = segmentLungs25D(niiResiz4Lung,
dirWithModel=dirModelLung,
pathOutNii=None,
outSize=shapeOrig,
# outSize=shape4Lung,
threshold=0.5)
except Exception as err:
msgErr('Cant segment lungs for file [{0}] : {1}, for series [{2}]'.format(pathNii, err, series))
return False
# (2.3.3) segment lesions
try:
lesionMask = segmentLesions3D(niiResiz4Lesi,
dirWithModel=dirModelLesion,
pathOutNii=None,
outSize=shapeOrig,
# outSize=shape4Lung,
threshold=None)
except Exception as err:
msgErr('Cant segment lesions for file [{0}] : {1}, for series [{2}]'.format(pathNii, err, series))
return False
# (2.3.4) save results
try:
nib.save(lungMask, pathSegmLungs)
nib.save(lesionMask, pathSegmLesions)
except Exception as err:
msgErr('Cant save segmentation results to file [{0}] : {1}, for series [{2}]'.format(pathSegmLesions, err, series))
return False
return True
def api_generateAllReports(series,
dirModelLung, dirModelLesion,
ptrLogger=None,
shape4Lung = (256, 256, 64), shape4Lesi = (128, 128, 64)):
# (1) msg-helpers
def msgInfo(msg):
if ptrLogger is not None:
ptrLogger.info(msg)
else:
print (msg)
def msgErr(msg):
if ptrLogger is not None:
ptrLogger.error(msg)
else:
print (msg)
# (0) prepare path-variables
pathNii = series.pathConvertedNifti(isRelative=False)
pathSegmLungs = series.pathPostprocLungs(isRelative=False)
pathSegmLesions = series.pathPostprocLesions(isRelative=False)
pathPreview = series.pathPostprocPreview(isRelative=False)
pathReport = series.pathPostprocReport(isRelative=False)
# (1) Lung/Lesions segmentation
retSegm = api_segmentLungAndLesion(dirModelLung=dirModelLung,
dirModelLesion=dirModelLesion,
series=series,
ptrLogger=ptrLogger,
shape4Lung=shape4Lung,
shape4Lesi=shape4Lesi)
msgInfo('Segmentation Lung/Lesion isOk = {0}'.format(retSegm))
if (not os.path.isfile(pathSegmLungs)) or (not os.path.isfile(pathSegmLesions)):
msgErr('Cant segment Lung/Lesion, skip... [{0}]'.format(series))
return False
# (2) calc lesion score
try:
niiLung = nib.load(pathSegmLungs)
niiLesion = nib.load(pathSegmLesions)
except Exception as err:
msgErr('Cant load Lung/Lesion Nifti data: [{0}], for {1}'.format(err, pathSegmLesions))
return False
try:
retLesionScore = preproc.prepareLesionDistribInfo(niiLung, niiLesion)
except Exception as err:
msgErr('Cant evaluate Lesion-score: [{0}], for {1}'.format(err, pathSegmLesions))
return False
# (3) prepare short report about lungs
try:
niiLungDiv, _ = preproc.makeLungedMaskNii(niiLung)
retLungInfo = preproc.prepareLungSizeInfoNii(niiLungDiv)
except Exception as err:
msgErr('Cant get Lung information : [{0}], for {1}'.format(err, series))
return False
# (4) generate preview & save preview image
try:
dataImg = preproc.normalizeCTImage(nib.load(pathNii).get_data())
dataMsk = niiLung.get_data()
dataLes = niiLesion.get_data()
imgPreview = preproc.makePreview4Lesion(dataImg, dataMsk, dataLes)
imgPreviewJson = {
"description": "CT Lesion preview",
"content-type": "image/jpeg",
"xsize": imgPreview.shape[1],
"ysize": imgPreview.shape[0],
"url": os.path.basename(pathPreview)
}
skio.imsave(pathPreview, imgPreview)
except Exception as err:
msgErr('Cant generate preview image : [{0}], for {1}'.format(err, series))
return False
# (5) generate & save JSON report
try:
jsonReport = preproc.getJsonReport(series=series,
reportLesionScore=retLesionScore,
reportLungs=retLungInfo,
lstImgJson=[imgPreviewJson])
with open(pathReport, 'w') as f:
f.write(json.dumps(jsonReport, indent=4))
except Exception as err:
msgErr('Cant generate final JSON report : [{0}], for {1}'.format(err, series))
return False
# FIXME: append PDF generation in future here
# (6) generate PDF preview
return True
#########################################
if __name__ == '__main__':
print ('---') |
import time
from types import StringTypes
from collections import defaultdict
from sfa.util.sfatime import utcparse, datetime_to_epoch
from sfa.util.sfalogging import logger
from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
from sfa.rspecs.rspec import RSpec
from sfa.planetlab.vlink import VLink
from sfa.planetlab.topology import Topology
from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname
from sfa.storage.model import SliverAllocation
from sfa.storage.alchemy import dbsession
MAXINT = 2L**31-1
class PlSlices:
rspec_to_slice_tag = {'max_rate':'net_max_rate'}
def __init__(self, driver):
self.driver = driver
def get_slivers(self, xrn, node=None):
hrn, type = urn_to_hrn(xrn)
slice_name = hrn_to_pl_slicename(hrn)
# XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
# of doing all of this?
#return self.driver.shell.GetSliceTicket(self.auth, slice_name)
# from PLCAPI.GetSlivers.get_slivers()
slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids']
slices = self.driver.shell.GetSlices(slice_name, slice_fields)
# Build up list of users and slice attributes
person_ids = set()
all_slice_tag_ids = set()
for slice in slices:
person_ids.update(slice['person_ids'])
all_slice_tag_ids.update(slice['slice_tag_ids'])
person_ids = list(person_ids)
all_slice_tag_ids = list(all_slice_tag_ids)
# Get user information
all_persons_list = self.driver.shell.GetPersons({'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids'])
all_persons = {}
for person in all_persons_list:
all_persons[person['person_id']] = person
# Build up list of keys
key_ids = set()
for person in all_persons.values():
key_ids.update(person['key_ids'])
key_ids = list(key_ids)
# Get user account keys
all_keys_list = self.driver.shell.GetKeys(key_ids, ['key_id', 'key', 'key_type'])
all_keys = {}
for key in all_keys_list:
all_keys[key['key_id']] = key
# Get slice attributes
all_slice_tags_list = self.driver.shell.GetSliceTags(all_slice_tag_ids)
all_slice_tags = {}
for slice_tag in all_slice_tags_list:
all_slice_tags[slice_tag['slice_tag_id']] = slice_tag
slivers = []
for slice in slices:
keys = []
for person_id in slice['person_ids']:
if person_id in all_persons:
person = all_persons[person_id]
if not person['enabled']:
continue
for key_id in person['key_ids']:
if key_id in all_keys:
key = all_keys[key_id]
keys += [{'key_type': key['key_type'],
'key': key['key']}]
attributes = []
# All (per-node and global) attributes for this slice
slice_tags = []
for slice_tag_id in slice['slice_tag_ids']:
if slice_tag_id in all_slice_tags:
slice_tags.append(all_slice_tags[slice_tag_id])
# Per-node sliver attributes take precedence over global
# slice attributes, so set them first.
# Then comes nodegroup slice attributes
# Followed by global slice attributes
sliver_attributes = []
if node is not None:
for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags):
sliver_attributes.append(sliver_attribute['tagname'])
attributes.append({'tagname': sliver_attribute['tagname'],
'value': sliver_attribute['value']})
# set nodegroup slice attributes
for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags):
# Do not set any nodegroup slice attributes for
# which there is at least one sliver attribute
# already set.
if slice_tag not in slice_tags:
attributes.append({'tagname': slice_tag['tagname'],
'value': slice_tag['value']})
for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags):
# Do not set any global slice attributes for
# which there is at least one sliver attribute
# already set.
if slice_tag['tagname'] not in sliver_attributes:
attributes.append({'tagname': slice_tag['tagname'],
'value': slice_tag['value']})
# XXX Sanity check; though technically this should be a system invariant
# checked with an assertion
if slice['expires'] > MAXINT: slice['expires']= MAXINT
slivers.append({
'hrn': hrn,
'name': slice['name'],
'slice_id': slice['slice_id'],
'instantiation': slice['instantiation'],
'expires': slice['expires'],
'keys': keys,
'attributes': attributes
})
return slivers
def get_peer(self, xrn):
hrn, type = urn_to_hrn(xrn)
# Becaues of myplc federation, we first need to determine if this
# slice belongs to out local plc or a myplc peer. We will assume it
# is a local site, unless we find out otherwise
peer = None
# get this slice's authority (site)
slice_authority = get_authority(hrn)
# get this site's authority (sfa root authority or sub authority)
site_authority = get_authority(slice_authority).lower()
# check if we are already peered with this site_authority, if so
peers = self.driver.shell.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
for peer_record in peers:
names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
if site_authority in names:
peer = peer_record
return peer
def get_sfa_peer(self, xrn):
hrn, type = urn_to_hrn(xrn)
# return the authority for this hrn or None if we are the authority
sfa_peer = None
slice_authority = get_authority(hrn)
site_authority = get_authority(slice_authority)
if site_authority != self.driver.hrn:
sfa_peer = site_authority
return sfa_peer
def verify_slice_leases(self, slice, rspec_requested_leases, peer):
leases = self.driver.shell.GetLeases({'name':slice['name'], 'clip':int(time.time())}, ['lease_id','name', 'hostname', 't_from', 't_until'])
grain = self.driver.shell.GetLeaseGranularity()
requested_leases = []
for lease in rspec_requested_leases:
requested_lease = {}
slice_name = hrn_to_pl_slicename(lease['slice_id'])
if slice_name != slice['name']:
continue
elif Xrn(lease['component_id']).get_authority_urn().split(':')[0] != self.driver.hrn:
continue
hostname = xrn_to_hostname(lease['component_id'])
# fill the requested node with nitos ids
requested_lease['name'] = slice['name']
requested_lease['hostname'] = hostname
requested_lease['t_from'] = int(lease['start_time'])
requested_lease['t_until'] = int(lease['duration']) * grain + int(lease['start_time'])
requested_leases.append(requested_lease)
# prepare actual slice leases by lease_id
leases_by_id = {}
for lease in leases:
leases_by_id[lease['lease_id']] = {'name': lease['name'], 'hostname': lease['hostname'], \
't_from': lease['t_from'], 't_until': lease['t_until']}
added_leases = []
kept_leases_id = []
deleted_leases_id = []
for lease_id in leases_by_id:
if leases_by_id[lease_id] not in requested_leases:
deleted_leases_id.append(lease_id)
else:
kept_leases_id.append(lease_id)
requested_leases.remove(leases_by_id[lease_id])
added_leases = requested_leases
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
self.driver.shell.DeleteLeases(deleted_leases_id)
for lease in added_leases:
self.driver.shell.AddLeases(lease['hostname'], slice['name'], lease['t_from'], lease['t_until'])
except:
logger.log_exc('Failed to add/remove slice leases')
return leases
def verify_slice_nodes(self, slice_urn, slice, rspec_nodes, peer):
slivers = {}
for node in rspec_nodes:
hostname = node.get('component_name')
client_id = node.get('client_id')
component_id = node.get('component_id').strip()
if hostname:
hostname = hostname.strip()
elif component_id:
hostname = xrn_to_hostname(component_id)
if hostname:
slivers[hostname] = {'client_id': client_id, 'component_id': component_id}
nodes = self.driver.shell.GetNodes(slice['node_ids'], ['node_id', 'hostname', 'interface_ids'])
current_slivers = [node['hostname'] for node in nodes]
# remove nodes not in rspec
deleted_nodes = list(set(current_slivers).difference(slivers.keys()))
# add nodes from rspec
added_nodes = list(set(slivers.keys()).difference(current_slivers))
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
self.driver.shell.AddSliceToNodes(slice['name'], added_nodes)
self.driver.shell.DeleteSliceFromNodes(slice['name'], deleted_nodes)
except:
logger.log_exc('Failed to add/remove slice from nodes')
slices = self.driver.shell.GetSlices(slice['name'], ['node_ids'])
resulting_nodes = self.driver.shell.GetNodes(slices[0]['node_ids'])
# update sliver allocations
for node in resulting_nodes:
client_id = slivers[node['hostname']]['client_id']
component_id = slivers[node['hostname']]['component_id']
sliver_hrn = '%s.%s-%s' % (self.driver.hrn, slice['slice_id'], node['node_id'])
sliver_id = Xrn(sliver_hrn, type='sliver').urn
record = SliverAllocation(sliver_id=sliver_id, client_id=client_id,
component_id=component_id,
slice_urn = slice_urn,
allocation_state='geni_allocated')
record.sync()
return resulting_nodes
def free_egre_key(self):
used = set()
for tag in self.driver.shell.GetSliceTags({'tagname': 'egre_key'}):
used.add(int(tag['value']))
for i in range(1, 256):
if i not in used:
key = i
break
else:
raise KeyError("No more EGRE keys available")
return str(key)
def verify_slice_links(self, slice, requested_links, nodes):
if not requested_links:
return
# exit if links are not supported here
topology = Topology()
if not topology:
return
# build dict of nodes
nodes_dict = {}
interface_ids = []
for node in nodes:
nodes_dict[node['node_id']] = node
interface_ids.extend(node['interface_ids'])
# build dict of interfaces
interfaces = self.driver.shell.GetInterfaces(interface_ids)
interfaces_dict = {}
for interface in interfaces:
interfaces_dict[interface['interface_id']] = interface
slice_tags = []
# set egre key
slice_tags.append({'name': 'egre_key', 'value': self.free_egre_key()})
# set netns
slice_tags.append({'name': 'netns', 'value': '1'})
# set cap_net_admin
# need to update the attribute string?
slice_tags.append({'name': 'capabilities', 'value': 'CAP_NET_ADMIN'})
for link in requested_links:
# get the ip address of the first node in the link
ifname1 = Xrn(link['interface1']['component_id']).get_leaf()
ifname_parts = ifname1.split(':')
node_raw = ifname_parts[0]
device = None
if len(ifname_parts) > 1:
device = ifname_parts[1]
node_id = int(node_raw.replace('node', ''))
node = nodes_dict[node_id]
if1 = interfaces_dict[node['interface_ids'][0]]
ipaddr = if1['ip']
topo_rspec = VLink.get_topo_rspec(link, ipaddr)
# set topo_rspec tag
slice_tags.append({'name': 'topo_rspec', 'value': str([topo_rspec]), 'node_id': node_id})
# set vini_topo tag
slice_tags.append({'name': 'vini_topo', 'value': 'manual', 'node_id': node_id})
#self.driver.shell.AddSliceTag(slice['name'], 'topo_rspec', str([topo_rspec]), node_id)
self.verify_slice_attributes(slice, slice_tags, {'append': True}, admin=True)
def handle_peer(self, site, slice, persons, peer):
if peer:
# bind site
try:
if site:
self.driver.shell.BindObjectToPeer('site', site['site_id'], peer['shortname'], slice['site_id'])
except Exception,e:
self.driver.shell.DeleteSite(site['site_id'])
raise e
# bind slice
try:
if slice:
self.driver.shell.BindObjectToPeer('slice', slice['slice_id'], peer['shortname'], slice['slice_id'])
except Exception,e:
self.driver.shell.DeleteSlice(slice['slice_id'])
raise e
# bind persons
for person in persons:
try:
self.driver.shell.BindObjectToPeer('person',
person['person_id'], peer['shortname'], person['peer_person_id'])
for (key, remote_key_id) in zip(person['keys'], person['key_ids']):
try:
self.driver.shell.BindObjectToPeer( 'key', key['key_id'], peer['shortname'], remote_key_id)
except:
self.driver.shell.DeleteKey(key['key_id'])
logger("failed to bind key: %s to peer: %s " % (key['key_id'], peer['shortname']))
except Exception,e:
self.driver.shell.DeletePerson(person['person_id'])
raise e
return slice
def verify_site(self, slice_xrn, slice_record={}, peer=None, sfa_peer=None, options={}):
(slice_hrn, type) = urn_to_hrn(slice_xrn)
site_hrn = get_authority(slice_hrn)
# login base can't be longer than 20 characters
slicename = hrn_to_pl_slicename(slice_hrn)
authority_name = slicename.split('_')[0]
login_base = authority_name[:20]
sites = self.driver.shell.GetSites(login_base)
if not sites:
# create new site record
site = {'name': 'geni.%s' % authority_name,
'abbreviated_name': authority_name,
'login_base': login_base,
'max_slices': 100,
'max_slivers': 1000,
'enabled': True,
'peer_site_id': None}
if peer:
site['peer_site_id'] = slice_record.get('site_id', None)
site['site_id'] = self.driver.shell.AddSite(site)
# exempt federated sites from monitor policies
self.driver.shell.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
# # is this still necessary?
# # add record to the local registry
# if sfa_peer and slice_record:
# peer_dict = {'type': 'authority', 'hrn': site_hrn, \
# 'peer_authority': sfa_peer, 'pointer': site['site_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
else:
site = sites[0]
if peer:
# unbind from peer so we can modify if necessary. Will bind back later
self.driver.shell.UnBindObjectFromPeer('site', site['site_id'], peer['shortname'])
return site
def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, expiration, options={}):
slicename = hrn_to_pl_slicename(slice_hrn)
parts = slicename.split("_")
login_base = parts[0]
slices = self.driver.shell.GetSlices([slicename])
expires = int(datetime_to_epoch(utcparse(expiration)))
if not slices:
slice = {'name': slicename,
'url': 'No Url',
'description': 'No Description'}
# add the slice
slice['slice_id'] = self.driver.shell.AddSlice(slice)
slice['node_ids'] = []
slice['person_ids'] = []
if peer and slice_record:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# set the expiration
self.driver.shell.UpdateSlice(slice['slice_id'], {'expires': expires})
else:
slice = slices[0]
if peer and slice_record:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# unbind from peer so we can modify if necessary. Will bind back later
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
#Update expiration if necessary
if slice['expires'] != expires:
self.driver.shell.UpdateSlice( slice['slice_id'], {'expires' : expires})
return slice
#def get_existing_persons(self, users):
def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, options={}):
users_by_email = {}
users_by_site = defaultdict(list)
users_dict = {}
for user in users:
user['urn'] = user['urn'].lower()
hrn, type = urn_to_hrn(user['urn'])
username = get_leaf(hrn)
login_base = PlXrn(xrn=user['urn']).pl_login_base()
user['username'] = username
user['site'] = login_base
if 'email' in user:
user['email'] = user['email'].lower()
users_by_email[user['email']] = user
users_dict[user['email']] = user
else:
users_by_site[user['site']].append(user)
# start building a list of existing users
existing_user_ids = []
existing_user_ids_filter = []
if users_by_email:
existing_user_ids_filter.extend(users_by_email.keys())
if users_by_site:
for login_base in users_by_site:
users = users_by_site[login_base]
for user in users:
existing_user_ids_filter.append(user['username']+'@geni.net')
if existing_user_ids_filter:
# get existing users by email
existing_users = self.driver.shell.GetPersons({'email': existing_user_ids_filter},
['person_id', 'key_ids', 'email'])
existing_user_ids.extend([user['email'] for user in existing_users])
if users_by_site:
# get a list of user sites (based on requeste user urns
site_list = self.driver.shell.GetSites(users_by_site.keys(), \
['site_id', 'login_base', 'person_ids'])
# get all existing users at these sites
sites = {}
site_user_ids = []
for site in site_list:
sites[site['site_id']] = site
site_user_ids.extend(site['person_ids'])
existing_site_persons_list = self.driver.shell.GetPersons(site_user_ids,
['person_id', 'key_ids', 'email', 'site_ids'])
# all requested users are either existing users or new (added) users
for login_base in users_by_site:
requested_site_users = users_by_site[login_base]
for requested_user in requested_site_users:
user_found = False
for existing_user in existing_site_persons_list:
for site_id in existing_user['site_ids']:
if site_id in sites:
site = sites[site_id]
if login_base == site['login_base'] and \
existing_user['email'].startswith(requested_user['username']+'@'):
existing_user_ids.append(existing_user['email'])
requested_user['email'] = existing_user['email']
users_dict[existing_user['email']] = requested_user
user_found = True
break
if user_found:
break
if user_found == False:
fake_email = requested_user['username'] + '@geni.net'
requested_user['email'] = fake_email
users_dict[fake_email] = requested_user
# requested slice users
requested_user_ids = users_dict.keys()
# existing slice users
existing_slice_users_filter = {'person_id': slice_record.get('person_ids', [])}
existing_slice_users = self.driver.shell.GetPersons(existing_slice_users_filter,
['person_id', 'key_ids', 'email'])
existing_slice_user_ids = [user['email'] for user in existing_slice_users]
# users to be added, removed or updated
added_user_ids = set(requested_user_ids).difference(existing_user_ids)
added_slice_user_ids = set(requested_user_ids).difference(existing_slice_user_ids)
removed_user_ids = set(existing_slice_user_ids).difference(requested_user_ids)
updated_user_ids = set(existing_slice_user_ids).intersection(requested_user_ids)
# Remove stale users (only if we are not appending).
# Append by default.
append = options.get('append', True)
if append == False:
for removed_user_id in removed_user_ids:
self.driver.shell.DeletePersonFromSlice(removed_user_id, slice_record['name'])
# update_existing users
updated_users_list = [user for user in users_dict.values() if user['email'] in \
updated_user_ids]
self.verify_keys(existing_slice_users, updated_users_list, peer, options)
added_persons = []
# add new users
for added_user_id in added_user_ids:
added_user = users_dict[added_user_id]
hrn, type = urn_to_hrn(added_user['urn'])
person = {
'first_name': added_user.get('first_name', hrn),
'last_name': added_user.get('last_name', hrn),
'email': added_user_id,
'peer_person_id': None,
'keys': [],
#'key_ids': added_user.get('key_ids', []),
}
person['person_id'] = self.driver.shell.AddPerson(person)
if peer:
person['peer_person_id'] = added_user['person_id']
added_persons.append(person)
# enable the account
self.driver.shell.UpdatePerson(person['person_id'], {'enabled': True})
# add person to site
self.driver.shell.AddPersonToSite(added_user_id, added_user['site'])
for key_string in added_user.get('keys', []):
key = {'key':key_string, 'key_type':'ssh'}
key['key_id'] = self.driver.shell.AddPersonKey(person['person_id'], key)
person['keys'].append(key)
# add the registry record
# if sfa_peer:
# peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
# 'pointer': person['person_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
# add person to the slice
self.driver.shell.AddPersonToSlice(added_slice_user_id, slice_record['name'])
# if this is a peer record then it should already be bound to a peer.
# no need to return worry about it getting bound later
return added_persons
def verify_keys(self, persons, users, peer, options={}):
# existing keys
key_ids = []
for person in persons:
key_ids.extend(person['key_ids'])
keylist = self.driver.shell.GetKeys(key_ids, ['key_id', 'key'])
keydict = {}
for key in keylist:
keydict[key['key']] = key['key_id']
existing_keys = keydict.keys()
persondict = {}
for person in persons:
persondict[person['email']] = person
# add new keys
requested_keys = []
updated_persons = []
for user in users:
user_keys = user.get('keys', [])
updated_persons.append(user)
for key_string in user_keys:
requested_keys.append(key_string)
if key_string not in existing_keys:
key = {'key': key_string, 'key_type': 'ssh'}
try:
if peer:
person = persondict[user['email']]
self.driver.shell.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
key['key_id'] = self.driver.shell.AddPersonKey(user['email'], key)
if peer:
key_index = user_keys.index(key['key'])
remote_key_id = user['key_ids'][key_index]
self.driver.shell.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
finally:
if peer:
self.driver.shell.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
# remove old keys (only if we are not appending)
append = options.get('append', True)
if append == False:
removed_keys = set(existing_keys).difference(requested_keys)
for existing_key_id in keydict:
if keydict[existing_key_id] in removed_keys:
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
self.driver.shell.DeleteKey(existing_key_id)
except:
pass
def verify_slice_attributes(self, slice, requested_slice_attributes, options={}, admin=False):
append = options.get('append', True)
# get list of attributes users ar able to manage
filter = {'category': '*slice*'}
if not admin:
filter['|roles'] = ['user']
slice_attributes = self.driver.shell.GetTagTypes(filter)
valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
# get sliver attributes
added_slice_attributes = []
removed_slice_attributes = []
ignored_slice_attribute_names = []
existing_slice_attributes = self.driver.shell.GetSliceTags({'slice_id': slice['slice_id']})
# get attributes that should be removed
for slice_tag in existing_slice_attributes:
if slice_tag['tagname'] in ignored_slice_attribute_names:
# If a slice already has a admin only role it was probably given to them by an
# admin, so we should ignore it.
ignored_slice_attribute_names.append(slice_tag['tagname'])
else:
# If an existing slice attribute was not found in the request it should
# be removed
attribute_found=False
for requested_attribute in requested_slice_attributes:
if requested_attribute['name'] == slice_tag['tagname'] and \
requested_attribute['value'] == slice_tag['value']:
attribute_found=True
break
if not attribute_found and not append:
removed_slice_attributes.append(slice_tag)
# get attributes that should be added:
for requested_attribute in requested_slice_attributes:
# if the requested attribute wasn't found we should add it
if requested_attribute['name'] in valid_slice_attribute_names:
attribute_found = False
for existing_attribute in existing_slice_attributes:
if requested_attribute['name'] == existing_attribute['tagname'] and \
requested_attribute['value'] == existing_attribute['value']:
attribute_found=True
break
if not attribute_found:
added_slice_attributes.append(requested_attribute)
# remove stale attributes
for attribute in removed_slice_attributes:
try:
self.driver.shell.DeleteSliceTag(attribute['slice_tag_id'])
except Exception, e:
logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
% (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
# add requested_attributes
for attribute in added_slice_attributes:
try:
self.driver.shell.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
except Exception, e:
logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
% (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
fix for potential ValueError
import time
from types import StringTypes
from collections import defaultdict
from sfa.util.sfatime import utcparse, datetime_to_epoch
from sfa.util.sfalogging import logger
from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
from sfa.rspecs.rspec import RSpec
from sfa.planetlab.vlink import VLink
from sfa.planetlab.topology import Topology
from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname
from sfa.storage.model import SliverAllocation
from sfa.storage.alchemy import dbsession
MAXINT = 2L**31-1
class PlSlices:
rspec_to_slice_tag = {'max_rate':'net_max_rate'}
def __init__(self, driver):
self.driver = driver
def get_slivers(self, xrn, node=None):
hrn, type = urn_to_hrn(xrn)
slice_name = hrn_to_pl_slicename(hrn)
# XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
# of doing all of this?
#return self.driver.shell.GetSliceTicket(self.auth, slice_name)
# from PLCAPI.GetSlivers.get_slivers()
slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids']
slices = self.driver.shell.GetSlices(slice_name, slice_fields)
# Build up list of users and slice attributes
person_ids = set()
all_slice_tag_ids = set()
for slice in slices:
person_ids.update(slice['person_ids'])
all_slice_tag_ids.update(slice['slice_tag_ids'])
person_ids = list(person_ids)
all_slice_tag_ids = list(all_slice_tag_ids)
# Get user information
all_persons_list = self.driver.shell.GetPersons({'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids'])
all_persons = {}
for person in all_persons_list:
all_persons[person['person_id']] = person
# Build up list of keys
key_ids = set()
for person in all_persons.values():
key_ids.update(person['key_ids'])
key_ids = list(key_ids)
# Get user account keys
all_keys_list = self.driver.shell.GetKeys(key_ids, ['key_id', 'key', 'key_type'])
all_keys = {}
for key in all_keys_list:
all_keys[key['key_id']] = key
# Get slice attributes
all_slice_tags_list = self.driver.shell.GetSliceTags(all_slice_tag_ids)
all_slice_tags = {}
for slice_tag in all_slice_tags_list:
all_slice_tags[slice_tag['slice_tag_id']] = slice_tag
slivers = []
for slice in slices:
keys = []
for person_id in slice['person_ids']:
if person_id in all_persons:
person = all_persons[person_id]
if not person['enabled']:
continue
for key_id in person['key_ids']:
if key_id in all_keys:
key = all_keys[key_id]
keys += [{'key_type': key['key_type'],
'key': key['key']}]
attributes = []
# All (per-node and global) attributes for this slice
slice_tags = []
for slice_tag_id in slice['slice_tag_ids']:
if slice_tag_id in all_slice_tags:
slice_tags.append(all_slice_tags[slice_tag_id])
# Per-node sliver attributes take precedence over global
# slice attributes, so set them first.
# Then comes nodegroup slice attributes
# Followed by global slice attributes
sliver_attributes = []
if node is not None:
for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags):
sliver_attributes.append(sliver_attribute['tagname'])
attributes.append({'tagname': sliver_attribute['tagname'],
'value': sliver_attribute['value']})
# set nodegroup slice attributes
for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags):
# Do not set any nodegroup slice attributes for
# which there is at least one sliver attribute
# already set.
if slice_tag not in slice_tags:
attributes.append({'tagname': slice_tag['tagname'],
'value': slice_tag['value']})
for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags):
# Do not set any global slice attributes for
# which there is at least one sliver attribute
# already set.
if slice_tag['tagname'] not in sliver_attributes:
attributes.append({'tagname': slice_tag['tagname'],
'value': slice_tag['value']})
# XXX Sanity check; though technically this should be a system invariant
# checked with an assertion
if slice['expires'] > MAXINT: slice['expires']= MAXINT
slivers.append({
'hrn': hrn,
'name': slice['name'],
'slice_id': slice['slice_id'],
'instantiation': slice['instantiation'],
'expires': slice['expires'],
'keys': keys,
'attributes': attributes
})
return slivers
def get_peer(self, xrn):
hrn, type = urn_to_hrn(xrn)
# Becaues of myplc federation, we first need to determine if this
# slice belongs to out local plc or a myplc peer. We will assume it
# is a local site, unless we find out otherwise
peer = None
# get this slice's authority (site)
slice_authority = get_authority(hrn)
# get this site's authority (sfa root authority or sub authority)
site_authority = get_authority(slice_authority).lower()
# check if we are already peered with this site_authority, if so
peers = self.driver.shell.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
for peer_record in peers:
names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
if site_authority in names:
peer = peer_record
return peer
def get_sfa_peer(self, xrn):
hrn, type = urn_to_hrn(xrn)
# return the authority for this hrn or None if we are the authority
sfa_peer = None
slice_authority = get_authority(hrn)
site_authority = get_authority(slice_authority)
if site_authority != self.driver.hrn:
sfa_peer = site_authority
return sfa_peer
def verify_slice_leases(self, slice, rspec_requested_leases, peer):
leases = self.driver.shell.GetLeases({'name':slice['name'], 'clip':int(time.time())}, ['lease_id','name', 'hostname', 't_from', 't_until'])
grain = self.driver.shell.GetLeaseGranularity()
requested_leases = []
for lease in rspec_requested_leases:
requested_lease = {}
slice_name = hrn_to_pl_slicename(lease['slice_id'])
if slice_name != slice['name']:
continue
elif Xrn(lease['component_id']).get_authority_urn().split(':')[0] != self.driver.hrn:
continue
hostname = xrn_to_hostname(lease['component_id'])
# fill the requested node with nitos ids
requested_lease['name'] = slice['name']
requested_lease['hostname'] = hostname
requested_lease['t_from'] = int(lease['start_time'])
requested_lease['t_until'] = int(lease['duration']) * grain + int(lease['start_time'])
requested_leases.append(requested_lease)
# prepare actual slice leases by lease_id
leases_by_id = {}
for lease in leases:
leases_by_id[lease['lease_id']] = {'name': lease['name'], 'hostname': lease['hostname'], \
't_from': lease['t_from'], 't_until': lease['t_until']}
added_leases = []
kept_leases_id = []
deleted_leases_id = []
for lease_id in leases_by_id:
if leases_by_id[lease_id] not in requested_leases:
deleted_leases_id.append(lease_id)
else:
kept_leases_id.append(lease_id)
requested_leases.remove(leases_by_id[lease_id])
added_leases = requested_leases
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
self.driver.shell.DeleteLeases(deleted_leases_id)
for lease in added_leases:
self.driver.shell.AddLeases(lease['hostname'], slice['name'], lease['t_from'], lease['t_until'])
except:
logger.log_exc('Failed to add/remove slice leases')
return leases
def verify_slice_nodes(self, slice_urn, slice, rspec_nodes, peer):
slivers = {}
for node in rspec_nodes:
hostname = node.get('component_name')
client_id = node.get('client_id')
component_id = node.get('component_id').strip()
if hostname:
hostname = hostname.strip()
elif component_id:
hostname = xrn_to_hostname(component_id)
if hostname:
slivers[hostname] = {'client_id': client_id, 'component_id': component_id}
nodes = self.driver.shell.GetNodes(slice['node_ids'], ['node_id', 'hostname', 'interface_ids'])
current_slivers = [node['hostname'] for node in nodes]
# remove nodes not in rspec
deleted_nodes = list(set(current_slivers).difference(slivers.keys()))
# add nodes from rspec
added_nodes = list(set(slivers.keys()).difference(current_slivers))
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
self.driver.shell.AddSliceToNodes(slice['name'], added_nodes)
self.driver.shell.DeleteSliceFromNodes(slice['name'], deleted_nodes)
except:
logger.log_exc('Failed to add/remove slice from nodes')
slices = self.driver.shell.GetSlices(slice['name'], ['node_ids'])
resulting_nodes = self.driver.shell.GetNodes(slices[0]['node_ids'])
# update sliver allocations
for node in resulting_nodes:
client_id = slivers[node['hostname']]['client_id']
component_id = slivers[node['hostname']]['component_id']
sliver_hrn = '%s.%s-%s' % (self.driver.hrn, slice['slice_id'], node['node_id'])
sliver_id = Xrn(sliver_hrn, type='sliver').urn
record = SliverAllocation(sliver_id=sliver_id, client_id=client_id,
component_id=component_id,
slice_urn = slice_urn,
allocation_state='geni_allocated')
record.sync()
return resulting_nodes
def free_egre_key(self):
used = set()
for tag in self.driver.shell.GetSliceTags({'tagname': 'egre_key'}):
used.add(int(tag['value']))
for i in range(1, 256):
if i not in used:
key = i
break
else:
raise KeyError("No more EGRE keys available")
return str(key)
def verify_slice_links(self, slice, requested_links, nodes):
if not requested_links:
return
# exit if links are not supported here
topology = Topology()
if not topology:
return
# build dict of nodes
nodes_dict = {}
interface_ids = []
for node in nodes:
nodes_dict[node['node_id']] = node
interface_ids.extend(node['interface_ids'])
# build dict of interfaces
interfaces = self.driver.shell.GetInterfaces(interface_ids)
interfaces_dict = {}
for interface in interfaces:
interfaces_dict[interface['interface_id']] = interface
slice_tags = []
# set egre key
slice_tags.append({'name': 'egre_key', 'value': self.free_egre_key()})
# set netns
slice_tags.append({'name': 'netns', 'value': '1'})
# set cap_net_admin
# need to update the attribute string?
slice_tags.append({'name': 'capabilities', 'value': 'CAP_NET_ADMIN'})
for link in requested_links:
# get the ip address of the first node in the link
ifname1 = Xrn(link['interface1']['component_id']).get_leaf()
if ifname1:
ifname_parts = ifname1.split(':')
node_raw = ifname_parts[0]
device = None
if len(ifname_parts) > 1:
device = ifname_parts[1]
node_id = int(node_raw.replace('node', ''))
node = nodes_dict[node_id]
if1 = interfaces_dict[node['interface_ids'][0]]
ipaddr = if1['ip']
topo_rspec = VLink.get_topo_rspec(link, ipaddr)
# set topo_rspec tag
slice_tags.append({'name': 'topo_rspec', 'value': str([topo_rspec]), 'node_id': node_id})
# set vini_topo tag
slice_tags.append({'name': 'vini_topo', 'value': 'manual', 'node_id': node_id})
#self.driver.shell.AddSliceTag(slice['name'], 'topo_rspec', str([topo_rspec]), node_id)
self.verify_slice_attributes(slice, slice_tags, {'append': True}, admin=True)
def handle_peer(self, site, slice, persons, peer):
if peer:
# bind site
try:
if site:
self.driver.shell.BindObjectToPeer('site', site['site_id'], peer['shortname'], slice['site_id'])
except Exception,e:
self.driver.shell.DeleteSite(site['site_id'])
raise e
# bind slice
try:
if slice:
self.driver.shell.BindObjectToPeer('slice', slice['slice_id'], peer['shortname'], slice['slice_id'])
except Exception,e:
self.driver.shell.DeleteSlice(slice['slice_id'])
raise e
# bind persons
for person in persons:
try:
self.driver.shell.BindObjectToPeer('person',
person['person_id'], peer['shortname'], person['peer_person_id'])
for (key, remote_key_id) in zip(person['keys'], person['key_ids']):
try:
self.driver.shell.BindObjectToPeer( 'key', key['key_id'], peer['shortname'], remote_key_id)
except:
self.driver.shell.DeleteKey(key['key_id'])
logger("failed to bind key: %s to peer: %s " % (key['key_id'], peer['shortname']))
except Exception,e:
self.driver.shell.DeletePerson(person['person_id'])
raise e
return slice
def verify_site(self, slice_xrn, slice_record={}, peer=None, sfa_peer=None, options={}):
(slice_hrn, type) = urn_to_hrn(slice_xrn)
site_hrn = get_authority(slice_hrn)
# login base can't be longer than 20 characters
slicename = hrn_to_pl_slicename(slice_hrn)
authority_name = slicename.split('_')[0]
login_base = authority_name[:20]
sites = self.driver.shell.GetSites(login_base)
if not sites:
# create new site record
site = {'name': 'geni.%s' % authority_name,
'abbreviated_name': authority_name,
'login_base': login_base,
'max_slices': 100,
'max_slivers': 1000,
'enabled': True,
'peer_site_id': None}
if peer:
site['peer_site_id'] = slice_record.get('site_id', None)
site['site_id'] = self.driver.shell.AddSite(site)
# exempt federated sites from monitor policies
self.driver.shell.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
# # is this still necessary?
# # add record to the local registry
# if sfa_peer and slice_record:
# peer_dict = {'type': 'authority', 'hrn': site_hrn, \
# 'peer_authority': sfa_peer, 'pointer': site['site_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
else:
site = sites[0]
if peer:
# unbind from peer so we can modify if necessary. Will bind back later
self.driver.shell.UnBindObjectFromPeer('site', site['site_id'], peer['shortname'])
return site
def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, expiration, options={}):
slicename = hrn_to_pl_slicename(slice_hrn)
parts = slicename.split("_")
login_base = parts[0]
slices = self.driver.shell.GetSlices([slicename])
expires = int(datetime_to_epoch(utcparse(expiration)))
if not slices:
slice = {'name': slicename,
'url': 'No Url',
'description': 'No Description'}
# add the slice
slice['slice_id'] = self.driver.shell.AddSlice(slice)
slice['node_ids'] = []
slice['person_ids'] = []
if peer and slice_record:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# set the expiration
self.driver.shell.UpdateSlice(slice['slice_id'], {'expires': expires})
else:
slice = slices[0]
if peer and slice_record:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# unbind from peer so we can modify if necessary. Will bind back later
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
#Update expiration if necessary
if slice['expires'] != expires:
self.driver.shell.UpdateSlice( slice['slice_id'], {'expires' : expires})
return slice
#def get_existing_persons(self, users):
def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, options={}):
users_by_email = {}
users_by_site = defaultdict(list)
users_dict = {}
for user in users:
user['urn'] = user['urn'].lower()
hrn, type = urn_to_hrn(user['urn'])
username = get_leaf(hrn)
login_base = PlXrn(xrn=user['urn']).pl_login_base()
user['username'] = username
user['site'] = login_base
if 'email' in user:
user['email'] = user['email'].lower()
users_by_email[user['email']] = user
users_dict[user['email']] = user
else:
users_by_site[user['site']].append(user)
# start building a list of existing users
existing_user_ids = []
existing_user_ids_filter = []
if users_by_email:
existing_user_ids_filter.extend(users_by_email.keys())
if users_by_site:
for login_base in users_by_site:
users = users_by_site[login_base]
for user in users:
existing_user_ids_filter.append(user['username']+'@geni.net')
if existing_user_ids_filter:
# get existing users by email
existing_users = self.driver.shell.GetPersons({'email': existing_user_ids_filter},
['person_id', 'key_ids', 'email'])
existing_user_ids.extend([user['email'] for user in existing_users])
if users_by_site:
# get a list of user sites (based on requeste user urns
site_list = self.driver.shell.GetSites(users_by_site.keys(), \
['site_id', 'login_base', 'person_ids'])
# get all existing users at these sites
sites = {}
site_user_ids = []
for site in site_list:
sites[site['site_id']] = site
site_user_ids.extend(site['person_ids'])
existing_site_persons_list = self.driver.shell.GetPersons(site_user_ids,
['person_id', 'key_ids', 'email', 'site_ids'])
# all requested users are either existing users or new (added) users
for login_base in users_by_site:
requested_site_users = users_by_site[login_base]
for requested_user in requested_site_users:
user_found = False
for existing_user in existing_site_persons_list:
for site_id in existing_user['site_ids']:
if site_id in sites:
site = sites[site_id]
if login_base == site['login_base'] and \
existing_user['email'].startswith(requested_user['username']+'@'):
existing_user_ids.append(existing_user['email'])
requested_user['email'] = existing_user['email']
users_dict[existing_user['email']] = requested_user
user_found = True
break
if user_found:
break
if user_found == False:
fake_email = requested_user['username'] + '@geni.net'
requested_user['email'] = fake_email
users_dict[fake_email] = requested_user
# requested slice users
requested_user_ids = users_dict.keys()
# existing slice users
existing_slice_users_filter = {'person_id': slice_record.get('person_ids', [])}
existing_slice_users = self.driver.shell.GetPersons(existing_slice_users_filter,
['person_id', 'key_ids', 'email'])
existing_slice_user_ids = [user['email'] for user in existing_slice_users]
# users to be added, removed or updated
added_user_ids = set(requested_user_ids).difference(existing_user_ids)
added_slice_user_ids = set(requested_user_ids).difference(existing_slice_user_ids)
removed_user_ids = set(existing_slice_user_ids).difference(requested_user_ids)
updated_user_ids = set(existing_slice_user_ids).intersection(requested_user_ids)
# Remove stale users (only if we are not appending).
# Append by default.
append = options.get('append', True)
if append == False:
for removed_user_id in removed_user_ids:
self.driver.shell.DeletePersonFromSlice(removed_user_id, slice_record['name'])
# update_existing users
updated_users_list = [user for user in users_dict.values() if user['email'] in \
updated_user_ids]
self.verify_keys(existing_slice_users, updated_users_list, peer, options)
added_persons = []
# add new users
for added_user_id in added_user_ids:
added_user = users_dict[added_user_id]
hrn, type = urn_to_hrn(added_user['urn'])
person = {
'first_name': added_user.get('first_name', hrn),
'last_name': added_user.get('last_name', hrn),
'email': added_user_id,
'peer_person_id': None,
'keys': [],
#'key_ids': added_user.get('key_ids', []),
}
person['person_id'] = self.driver.shell.AddPerson(person)
if peer:
person['peer_person_id'] = added_user['person_id']
added_persons.append(person)
# enable the account
self.driver.shell.UpdatePerson(person['person_id'], {'enabled': True})
# add person to site
self.driver.shell.AddPersonToSite(added_user_id, added_user['site'])
for key_string in added_user.get('keys', []):
key = {'key':key_string, 'key_type':'ssh'}
key['key_id'] = self.driver.shell.AddPersonKey(person['person_id'], key)
person['keys'].append(key)
# add the registry record
# if sfa_peer:
# peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
# 'pointer': person['person_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
# add person to the slice
self.driver.shell.AddPersonToSlice(added_slice_user_id, slice_record['name'])
# if this is a peer record then it should already be bound to a peer.
# no need to return worry about it getting bound later
return added_persons
def verify_keys(self, persons, users, peer, options={}):
# existing keys
key_ids = []
for person in persons:
key_ids.extend(person['key_ids'])
keylist = self.driver.shell.GetKeys(key_ids, ['key_id', 'key'])
keydict = {}
for key in keylist:
keydict[key['key']] = key['key_id']
existing_keys = keydict.keys()
persondict = {}
for person in persons:
persondict[person['email']] = person
# add new keys
requested_keys = []
updated_persons = []
for user in users:
user_keys = user.get('keys', [])
updated_persons.append(user)
for key_string in user_keys:
requested_keys.append(key_string)
if key_string not in existing_keys:
key = {'key': key_string, 'key_type': 'ssh'}
try:
if peer:
person = persondict[user['email']]
self.driver.shell.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
key['key_id'] = self.driver.shell.AddPersonKey(user['email'], key)
if peer:
key_index = user_keys.index(key['key'])
remote_key_id = user['key_ids'][key_index]
self.driver.shell.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
finally:
if peer:
self.driver.shell.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
# remove old keys (only if we are not appending)
append = options.get('append', True)
if append == False:
removed_keys = set(existing_keys).difference(requested_keys)
for existing_key_id in keydict:
if keydict[existing_key_id] in removed_keys:
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
self.driver.shell.DeleteKey(existing_key_id)
except:
pass
def verify_slice_attributes(self, slice, requested_slice_attributes, options={}, admin=False):
append = options.get('append', True)
# get list of attributes users ar able to manage
filter = {'category': '*slice*'}
if not admin:
filter['|roles'] = ['user']
slice_attributes = self.driver.shell.GetTagTypes(filter)
valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
# get sliver attributes
added_slice_attributes = []
removed_slice_attributes = []
ignored_slice_attribute_names = []
existing_slice_attributes = self.driver.shell.GetSliceTags({'slice_id': slice['slice_id']})
# get attributes that should be removed
for slice_tag in existing_slice_attributes:
if slice_tag['tagname'] in ignored_slice_attribute_names:
# If a slice already has a admin only role it was probably given to them by an
# admin, so we should ignore it.
ignored_slice_attribute_names.append(slice_tag['tagname'])
else:
# If an existing slice attribute was not found in the request it should
# be removed
attribute_found=False
for requested_attribute in requested_slice_attributes:
if requested_attribute['name'] == slice_tag['tagname'] and \
requested_attribute['value'] == slice_tag['value']:
attribute_found=True
break
if not attribute_found and not append:
removed_slice_attributes.append(slice_tag)
# get attributes that should be added:
for requested_attribute in requested_slice_attributes:
# if the requested attribute wasn't found we should add it
if requested_attribute['name'] in valid_slice_attribute_names:
attribute_found = False
for existing_attribute in existing_slice_attributes:
if requested_attribute['name'] == existing_attribute['tagname'] and \
requested_attribute['value'] == existing_attribute['value']:
attribute_found=True
break
if not attribute_found:
added_slice_attributes.append(requested_attribute)
# remove stale attributes
for attribute in removed_slice_attributes:
try:
self.driver.shell.DeleteSliceTag(attribute['slice_tag_id'])
except Exception, e:
logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
% (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
# add requested_attributes
for attribute in added_slice_attributes:
try:
self.driver.shell.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
except Exception, e:
logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
% (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
|
# -*- coding: utf-8 -*-
#
# Centreon Broker documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 11 14:40:51 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_centreon_rtd = os.environ.get('CENTREON_RTD', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx']
intersphinx_mapping = {
'centreonengine' : ('http://documentation.centreon.com/docs/centreon-engine/en/latest', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Centreon Broker'
copyright = u'2012, Merethis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.4'
# The full version, including alpha/beta/rc tags.
release = '2.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
if on_centreon_rtd:
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'centreon'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["/srv/themes"]
else:
html_theme = 'default'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CentreonBrokerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CentreonBroker.tex', u'Centreon Broker Documentation',
u'Merethis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'centreonbroker', u'Centreon Broker Documentation',
[u'Merethis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CentreonBroker', u'Centreon Broker Documentation',
u'Merethis', 'CentreonBroker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
Update centreon-broker version into the documentation.
# -*- coding: utf-8 -*-
#
# Centreon Broker documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 11 14:40:51 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
on_centreon_rtd = os.environ.get('CENTREON_RTD', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.intersphinx']
intersphinx_mapping = {
'centreonengine' : ('http://documentation.centreon.com/docs/centreon-engine/en/latest', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Centreon Broker'
copyright = u'2012, Merethis'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.5'
# The full version, including alpha/beta/rc tags.
release = '2.5.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
if on_centreon_rtd:
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'centreon'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["/srv/themes"]
else:
html_theme = 'default'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CentreonBrokerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CentreonBroker.tex', u'Centreon Broker Documentation',
u'Merethis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'centreonbroker', u'Centreon Broker Documentation',
[u'Merethis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CentreonBroker', u'Centreon Broker Documentation',
u'Merethis', 'CentreonBroker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
# Copyright (c) 2013 Galah Group LLC
# Copyright (c) 2013 Other contributers as noted in the CONTRIBUTERS file
#
# This file is part of galah-interact-python.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions and classes that are essential when using this library. Is imported
by ``interact/__init__.py`` such that ``interact.core.x`` and ``interact.x``
are equivalent.
"""
import _utils
import os.path
import sys
class TestResult:
"""
Represents the result of one unit of testing. The goal is to generate a
number of these and then pass them all out of the test harness with a final
score.
:ivar brief: A brief description of the test that was run. This will always
be displayed to the user.
:ivar score: The score the student received from this test.
:ivar max_score: The maximum score the student could have received from this
test.
:ivar messages: A list of :class:`TestResult.Message` objects that will be
joined together appropriately and displayed to the user. Use
:meth:`add_message` to add to this list.
:ivar default_message: A string that will be displayed if there are no
messages. Useful to easily create a "good job" message that is shown
when no problems were detected.
:ivar bulleted_messages: A boolean. If ``True``, all of the messages will be
printed out in bullet point form (a message per bullet). If
``False``, they will simply be printed out one-per-line. You can
set this to ``False`` if only one message will ever be displayed to
the user, otherwise bullet points usually look better.
"""
class Message:
"""
A message to the user. This is the primary mode of giving feedback to
users.
"""
def __init__(self, text, *args, **kwargs):
# We have some special keyword arguments that we want to snatch if
# present.
self.dscore = kwargs.get("dscore", None)
self.type = kwargs.get("type", None)
self.args = args
self.kwargs = kwargs
self.text = text
def __str__(self):
return self.text.format(*self.args, **self.kwargs)
def __repr__(self):
return _utils.default_repr(self)
def __init__(
self, brief = None, score = None, max_score = None, messages = None,
default_message = None, bulleted_messages = True):
if messages is None:
messages = []
self.brief = brief
self.score = score
self.max_score = max_score
self.messages = messages
self.default_message = default_message
self.bulleted_messages = bulleted_messages
def add_message(self, *args, **kwargs):
"""
Adds a message object to the TestResult. If a Message object that is
used, otherwise a new Message object is constructed and its constructor
is passed all the arguments.
"""
if len(args) == 1 and isinstance(args[0], TestResult.Message):
self.messages.append(args[0])
else:
self.messages.append(TestResult.Message(*args, **kwargs))
def calculate_score(self, starting_score = None, max_score = None,
min_score = None):
"""
Automatically calculates the score by adding up the ``dscore`` of
each message and setting the score of the :class:`TestResult`
appropriately.
:param starting_score: This score is added to the sum of every message's
``dscore``. If ``None``, ``max_score`` is used.
:param max_score: The ``max_score`` field of the object is set to this
value. If ``None``, the current ``max_score`` is used, i.e. no
change is made.
:param min_score: If the calculated score is less than this value, this
value is used instead.
:returns: ``self``. This allows you to return the result of this
function from test functions.
>>> a = TestResult(max_score = 4)
>>> a.add_message("Foo", dscore = -1)
>>> a.add_message("Bar!", dscore = -5)
>>> print a.calculate_score().score
-2
>>> print a.score
-2
>>> print a.calculate_score(min_score = 0).score
0
>>> print a.calculate_score(starting_score = 8, max_score = 6).score
2
>>> print a.max_score
6
"""
if max_score is None:
max_score = self.max_score
if starting_score is None:
starting_score = max_score
self.score = starting_score
self.max_score = max_score
for i in self.messages:
if i.dscore is not None:
self.score += i.dscore
if self.score < min_score:
self.score = min_score
return self
def set_passing(self, passing):
"""
:param passing: Whether the test is passing or not.
:returns: ``self``. This allows you to return the result of this
function directly, leading to more concise test functions.
This function sets ``score`` to either 1 (if ``passing`` is ``True``) or
0 (if ``passing`` is ``False``). It also sets the ``max_score`` to
``1``.
.. seealso::
:meth:`is_passing` and :meth:`is_failing`.
"""
self.score = 1 if passing else 0
self.max_score = 1
return self
def is_passing(self):
"""
:returns: ``True`` if the score is not ``0`` (note this function *will*
return ``True`` if the score is negative).
This function is most useful when dealing with a ``TestResult`` that you
want to consider either passing or failing, and never anything in
between.
.. seealso::
:meth:`set_passing` and :meth:`is_failing`.
"""
return self.score != 0
def is_failing(self):
"""
:returns: The inverse of what :meth:`is_passing` returns.
This function is most useful when dealing with a ``TestResult`` that you
want to consider either passing or failing, and never anything in
between.
.. seealso::
:meth:`set_passing` and :meth:`is_passing`.
"""
return not self.is_passing()
def to_galah_dict(self, name):
return {
"name": name,
"score": 0 if self.score is None else self.score,
"max_score": 0 if self.max_score is None else self.max_score,
"message": self.to_str(show_score = False)
}
def to_str(self, show_score = True):
result = []
quick_status = ""
if show_score and self.score is not None:
quick_status += "Score: %d" % self.score
if self.max_score is not None and self.max_score != 0:
quick_status += " out of %d" % self.max_score
result += [quick_status, ""]
if self.brief:
result += [str(self.brief), ""]
if self.messages:
for i in self.messages:
if self.bulleted_messages:
result.append(" * " + str(i))
else:
result.append(str(i))
result.append("")
elif self.default_message:
result += [self.default_message, ""]
return "\n".join(result[:-1])
def __str__(self):
return self.to_str()
def __repr__(self):
return _utils.default_repr(self)
class UniverseSet(set):
"""
A special ``set`` such that every ``in`` query returns ``True``.
>>> a = UniverseSet()
>>> "hamster" in a
True
>>> "apple sauce" in a
True
>>> 3234 in a
True
>>> "taco" not in a
False
"""
def __init__(self, iterable = None):
set.__init__(self, iterable if iterable is not None else [])
def __contains__(self, item):
return True
def json_module():
"""
A handy function that will try to find a suitable JSON module to import and
return that module (already loaded).
Basically, it tries to load the ``json`` module, and if that doesn't exist
it tries to load the ``simplejson`` module. If that doesn't exist, a
friendly ``ImportError`` is raised.
"""
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
raise ImportError(
"Could not load a suitable JSON module. You can try upgrading "
"your version of Python or installing the simplejson library."
)
return json
class Harness:
"""
An omniscient object responsible for driving the behavior of any Test
Harness created using Galah Interact. Create a single one of these when
you create your Test Harness and call the ``start()`` method.
A typical Test Harness will roughly follow the below format.
.. code-block:: python
import interact
harness = interact.Harness()
harness.start()
# Your code here
harness.run_tests()
harness.finish(max_score = some_number)
:ivar sheep_data: The "configuration" values received from outside the
harness (either from Galah or command line arguments). See
:doc:`cli`.
:ivar execution_mode: The mode of execution the harness is running in. Is
set by :meth:`Harness.start` and is ``None`` before it is set. For
information on the different modes, check out :doc:`cli`.
"""
class Test:
"""
Meta information on a single test.
"""
def __init__(self, name, depends, func, result = None):
self.name = name
self.depends = [] if depends is None else depends
self.func = func
self.result = result
def __init__(self):
self.sheep_data = {}
self.tests = {}
self.execution_mode = None
def _parse_arguments(self, args = sys.argv[1:]):
"""
_parse_arguments(args = sys.argv[1:])
Parses command line arguments.
:param args: A list of command line arguments to parse. Should not
include the usual zeroeth argument specifying the executable's
name.
:returns: A two tuple, ``(options, args)``. For the meaning of each item
in the tuple, see the documentation for
`optparse.OptionParser.parse_args() <http://docs.python.org/2/library/optparse.html#parsing-arguments>`.
"""
from optparse import OptionParser, make_option
option_list = [
make_option(
"-m", "--mode", dest = "mode", action = "store",
default = "galah", choices = ("galah", "test"),
help = "Specify the mode of execution the Test Harness is in. "
"Default mode is %default."
),
make_option(
"-s", "--set-value", dest = "values", action = "append",
nargs = 2, metavar = "KEY VALUE",
help = "Sets one of the 'configuration' values."
)
]
parser = OptionParser(
description =
"A test harness created with the Galah Interact library.",
option_list = option_list
)
options, args = parser.parse_args(args)
return (options, args)
@staticmethod
def _guess_values():
"""
Guesses values for the key value pairs in sheep_data. For more
information on this check out :doc:`cli`.
"""
return {
"testables_directory": os.getcwd(),
"harness_directory": os.path.dirname(_utils.get_root_script_path()),
"raw_submission": None,
"raw_assignment": None,
"raw_harness": None,
"actions": UniverseSet()
}
def start(self, arguments = sys.argv[1:]):
"""
start(self, arguments = sys.argv[1:])
Takes in input from the proper source, initializes the harness with
values it needs to function correctly.
:param arguments: A list of command line arguments that will be read to
determine the harness's behavior. See below for more information
on this.
:returns: ``None``
.. seealso::
:doc:`cli`
"""
options, args = self._parse_arguments(arguments)
self.execution_mode = options.mode
if options.mode == "galah":
json = json_module()
self.sheep_data = json.load(sys.stdin)
elif options.mode == "test":
self.sheep_data = Harness._guess_values()
if options.values is not None:
JSON_FIELDS = (
"raw_submission", "raw_assignment", "raw_harness", "actions"
)
for k, v in options.values:
value = v
if k in JSON_FIELDS:
json = json_module()
value = json.loads(v)
self.sheep_data[k] = value
else:
raise ValueError(
"Execution mode is set to a value that is not recognized: %s",
(options.mode, )
)
def finish(self, score = None, max_score = None):
"""
Marks the end of the test harness. When start was not initialized via
command line arguments, this command will print out the test results in
a human readable fashion. Otherwise it will print out JSON appropriate
for Galah to read.
"""
if score is None or max_score is None:
new_score = 0
new_max_score = 0
for i in self.tests.values():
if i is not None:
if i.result.score:
new_score += i.result.score
if i.result.max_score:
new_max_score += i.result.max_score
if score is None:
score = new_score
if max_score is None:
max_score = new_max_score
if self.execution_mode == "test":
for i in self.tests.values():
if i.result:
print i.result
print "-------"
print "Final result: %d out of %d" % (score, max_score)
elif self.execution_mode == "galah":
import json
results = {
"score": score,
"max_score": max_score,
"tests": []
}
for i in self.tests.values():
if i is not None:
results["tests"].append(i.result.to_galah_dict(i.name))
json.dump(results, sys.stdout)
else:
# A bad execution mode should be detected in the start() function.
# This is our fault if a user encounters this (and didn't set
# execution_mode themselves).
raise AssertionError("Unknown execution mode.")
def test(self, name, depends = None):
"""
A decorator that takes in a test name and some dependencies and makes
the harness aware of it all.
"""
def test_decorator(func):
def inner(*args, **kwargs):
return func(*args, **kwargs)
self.tests[inner] = Harness.Test(name, depends, inner)
return inner
return test_decorator
class CyclicDependency(RuntimeError):
def __init__(self, *args, **kwargs):
RuntimeError.__init__(self, *args, **kwargs)
class FailedDependencies(TestResult):
"""
A special :class:`TestResult` used by :meth:`Harness.run_tests` whenever
a test couldn't be run do to one of its dependencies failing.
.. code-block:: python
>>> a = interact.Harness.FailedDependencies()
>>> a.add_failure("Program compiles")
>>> a.add_failure("Program is sane")
>>> print a
Score: 0 out of 10
This test will only be run if all of the other tests it depends on pass first. Fix those tests *before* worrying about this one.
* Dependency *Program compiles* failed.
* Dependency *Program is sane* failed.
"""
def __init__(self, max_score = 10):
TestResult.__init__(
self,
brief = "This test will only be run if all of the other tests it "
"depends on pass first. Fix those tests *before* worrying "
"about this one.",
score = 0,
max_score = 10
)
def add_failure(self, test_name):
"""
Adds a new failure message to the test result signifying a
particular dependency has failed.
"""
self.add_message(
"Dependency *{dependency_name}* failed.",
dependency_name = test_name
)
def run_tests(self):
"""
Runs all of the tests the user has registered.
:raises: :class:`Harness.CyclicDependency` if a cyclic dependency exists
among the test functions.
Any tests that can't be run due to failed dependencies will have
instances of :class:`Harness.FailedDependencies` as their result.
"""
# Do a topological sort with a simple depth-first-search algorithm.
# Thank you wikipedia for the pseudocodeand inspiration:
# http://en.wikipedia.org/wiki/Topological_sorting
temporary_marks = set()
permanent_marks = set()
def visit(node):
if node in temporary_marks:
raise Harness.CyclicDependency(
"One or more cyclic dependencies exist among your test "
"functions."
)
if node not in permanent_marks:
# Temporarily mark the node. This is necessary because if we
# end up hitting a node that has a temporary mark as we recurse
# through the "DAG", it means we have a cycle. Hitting a
# permanently marked node when we're recursing is fine because
# all that means is two nodes share the same dependency.
temporary_marks.add(node)
dependencies_failed = []
for dependency in (self.tests[i] for i in node.depends):
if visit(dependency).result.is_failing():
dependencies_failed.append(dependency)
temporary_marks.remove(node)
permanent_marks.add(node)
if not dependencies_failed:
node.result = node.func()
else:
node.result = Harness.FailedDependencies()
for i in dependencies_failed:
node.result.add_failure(i.name)
return node
for test in self.tests.values():
visit(test)
def student_file(self, filename):
"""
Given a path to a student's file relative to the root of the student's
submission, returns an absolute path to that file.
"""
testables_directory = \
self.sheep_data.get("testables_directory", "../submission/")
path = os.path.join(testables_directory, filename)
# Ensure that we return an absolute path.
return _utils.resolve_path(path)
def student_files(self, *args):
"""
Very similar to student_file. Given many files as arguments, will return
a list of absolute paths to those files.
"""
return [self.student_file(i) for i in args]
Made Harness.tests an ordered dictionary.
* The results are now printed out in a sane fashion: the order in which
the tests were declared.
* Added documentation on the tests dictionary as it was missing
previously.
# Copyright (c) 2013 Galah Group LLC
# Copyright (c) 2013 Other contributers as noted in the CONTRIBUTERS file
#
# This file is part of galah-interact-python.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Functions and classes that are essential when using this library. Is imported
by ``interact/__init__.py`` such that ``interact.core.x`` and ``interact.x``
are equivalent.
"""
import _utils
import os.path
import sys
import collections
class TestResult:
"""
Represents the result of one unit of testing. The goal is to generate a
number of these and then pass them all out of the test harness with a final
score.
:ivar brief: A brief description of the test that was run. This will always
be displayed to the user.
:ivar score: The score the student received from this test.
:ivar max_score: The maximum score the student could have received from this
test.
:ivar messages: A list of :class:`TestResult.Message` objects that will be
joined together appropriately and displayed to the user. Use
:meth:`add_message` to add to this list.
:ivar default_message: A string that will be displayed if there are no
messages. Useful to easily create a "good job" message that is shown
when no problems were detected.
:ivar bulleted_messages: A boolean. If ``True``, all of the messages will be
printed out in bullet point form (a message per bullet). If
``False``, they will simply be printed out one-per-line. You can
set this to ``False`` if only one message will ever be displayed to
the user, otherwise bullet points usually look better.
"""
class Message:
"""
A message to the user. This is the primary mode of giving feedback to
users.
"""
def __init__(self, text, *args, **kwargs):
# We have some special keyword arguments that we want to snatch if
# present.
self.dscore = kwargs.get("dscore", None)
self.type = kwargs.get("type", None)
self.args = args
self.kwargs = kwargs
self.text = text
def __str__(self):
return self.text.format(*self.args, **self.kwargs)
def __repr__(self):
return _utils.default_repr(self)
def __init__(
self, brief = None, score = None, max_score = None, messages = None,
default_message = None, bulleted_messages = True):
if messages is None:
messages = []
self.brief = brief
self.score = score
self.max_score = max_score
self.messages = messages
self.default_message = default_message
self.bulleted_messages = bulleted_messages
def add_message(self, *args, **kwargs):
"""
Adds a message object to the TestResult. If a Message object that is
used, otherwise a new Message object is constructed and its constructor
is passed all the arguments.
"""
if len(args) == 1 and isinstance(args[0], TestResult.Message):
self.messages.append(args[0])
else:
self.messages.append(TestResult.Message(*args, **kwargs))
def calculate_score(self, starting_score = None, max_score = None,
min_score = None):
"""
Automatically calculates the score by adding up the ``dscore`` of
each message and setting the score of the :class:`TestResult`
appropriately.
:param starting_score: This score is added to the sum of every message's
``dscore``. If ``None``, ``max_score`` is used.
:param max_score: The ``max_score`` field of the object is set to this
value. If ``None``, the current ``max_score`` is used, i.e. no
change is made.
:param min_score: If the calculated score is less than this value, this
value is used instead.
:returns: ``self``. This allows you to return the result of this
function from test functions.
>>> a = TestResult(max_score = 4)
>>> a.add_message("Foo", dscore = -1)
>>> a.add_message("Bar!", dscore = -5)
>>> print a.calculate_score().score
-2
>>> print a.score
-2
>>> print a.calculate_score(min_score = 0).score
0
>>> print a.calculate_score(starting_score = 8, max_score = 6).score
2
>>> print a.max_score
6
"""
if max_score is None:
max_score = self.max_score
if starting_score is None:
starting_score = max_score
self.score = starting_score
self.max_score = max_score
for i in self.messages:
if i.dscore is not None:
self.score += i.dscore
if self.score < min_score:
self.score = min_score
return self
def set_passing(self, passing):
"""
:param passing: Whether the test is passing or not.
:returns: ``self``. This allows you to return the result of this
function directly, leading to more concise test functions.
This function sets ``score`` to either 1 (if ``passing`` is ``True``) or
0 (if ``passing`` is ``False``). It also sets the ``max_score`` to
``1``.
.. seealso::
:meth:`is_passing` and :meth:`is_failing`.
"""
self.score = 1 if passing else 0
self.max_score = 1
return self
def is_passing(self):
"""
:returns: ``True`` if the score is not ``0`` (note this function *will*
return ``True`` if the score is negative).
This function is most useful when dealing with a ``TestResult`` that you
want to consider either passing or failing, and never anything in
between.
.. seealso::
:meth:`set_passing` and :meth:`is_failing`.
"""
return self.score != 0
def is_failing(self):
"""
:returns: The inverse of what :meth:`is_passing` returns.
This function is most useful when dealing with a ``TestResult`` that you
want to consider either passing or failing, and never anything in
between.
.. seealso::
:meth:`set_passing` and :meth:`is_passing`.
"""
return not self.is_passing()
def to_galah_dict(self, name):
return {
"name": name,
"score": 0 if self.score is None else self.score,
"max_score": 0 if self.max_score is None else self.max_score,
"message": self.to_str(show_score = False)
}
def to_str(self, show_score = True):
result = []
quick_status = ""
if show_score and self.score is not None:
quick_status += "Score: %d" % self.score
if self.max_score is not None and self.max_score != 0:
quick_status += " out of %d" % self.max_score
result += [quick_status, ""]
if self.brief:
result += [str(self.brief), ""]
if self.messages:
for i in self.messages:
if self.bulleted_messages:
result.append(" * " + str(i))
else:
result.append(str(i))
result.append("")
elif self.default_message:
result += [self.default_message, ""]
return "\n".join(result[:-1])
def __str__(self):
return self.to_str()
def __repr__(self):
return _utils.default_repr(self)
class UniverseSet(set):
"""
A special ``set`` such that every ``in`` query returns ``True``.
>>> a = UniverseSet()
>>> "hamster" in a
True
>>> "apple sauce" in a
True
>>> 3234 in a
True
>>> "taco" not in a
False
"""
def __init__(self, iterable = None):
set.__init__(self, iterable if iterable is not None else [])
def __contains__(self, item):
return True
def json_module():
"""
A handy function that will try to find a suitable JSON module to import and
return that module (already loaded).
Basically, it tries to load the ``json`` module, and if that doesn't exist
it tries to load the ``simplejson`` module. If that doesn't exist, a
friendly ``ImportError`` is raised.
"""
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
raise ImportError(
"Could not load a suitable JSON module. You can try upgrading "
"your version of Python or installing the simplejson library."
)
return json
class Harness:
"""
An omniscient object responsible for driving the behavior of any Test
Harness created using Galah Interact. Create a single one of these when
you create your Test Harness and call the ``start()`` method.
A typical Test Harness will roughly follow the below format.
.. code-block:: python
import interact
harness = interact.Harness()
harness.start()
# Your code here
harness.run_tests()
harness.finish(max_score = some_number)
:ivar sheep_data: The "configuration" values received from outside the
harness (either from Galah or command line arguments). See
:doc:`cli`.
:ivar execution_mode: The mode of execution the harness is running in. Is
set by :meth:`Harness.start` and is ``None`` before it is set. For
information on the different modes, check out :doc:`cli`.
:ivar tests: An ``OrderedDict`` mapping test functions to
:class:`Harness.Test` objects.
"""
class Test:
"""
Meta information on a single test.
"""
def __init__(self, name, depends, func, result = None):
self.name = name
self.depends = [] if depends is None else depends
self.func = func
self.result = result
def __init__(self):
self.sheep_data = {}
self.tests = collections.OrderedDict()
self.execution_mode = None
def _parse_arguments(self, args = sys.argv[1:]):
"""
_parse_arguments(args = sys.argv[1:])
Parses command line arguments.
:param args: A list of command line arguments to parse. Should not
include the usual zeroeth argument specifying the executable's
name.
:returns: A two tuple, ``(options, args)``. For the meaning of each item
in the tuple, see the documentation for
`optparse.OptionParser.parse_args() <http://docs.python.org/2/library/optparse.html#parsing-arguments>`.
"""
from optparse import OptionParser, make_option
option_list = [
make_option(
"-m", "--mode", dest = "mode", action = "store",
default = "galah", choices = ("galah", "test"),
help = "Specify the mode of execution the Test Harness is in. "
"Default mode is %default."
),
make_option(
"-s", "--set-value", dest = "values", action = "append",
nargs = 2, metavar = "KEY VALUE",
help = "Sets one of the 'configuration' values."
)
]
parser = OptionParser(
description =
"A test harness created with the Galah Interact library.",
option_list = option_list
)
options, args = parser.parse_args(args)
return (options, args)
@staticmethod
def _guess_values():
"""
Guesses values for the key value pairs in sheep_data. For more
information on this check out :doc:`cli`.
"""
return {
"testables_directory": os.getcwd(),
"harness_directory": os.path.dirname(_utils.get_root_script_path()),
"raw_submission": None,
"raw_assignment": None,
"raw_harness": None,
"actions": UniverseSet()
}
def start(self, arguments = sys.argv[1:]):
"""
start(self, arguments = sys.argv[1:])
Takes in input from the proper source, initializes the harness with
values it needs to function correctly.
:param arguments: A list of command line arguments that will be read to
determine the harness's behavior. See below for more information
on this.
:returns: ``None``
.. seealso::
:doc:`cli`
"""
options, args = self._parse_arguments(arguments)
self.execution_mode = options.mode
if options.mode == "galah":
json = json_module()
self.sheep_data = json.load(sys.stdin)
elif options.mode == "test":
self.sheep_data = Harness._guess_values()
if options.values is not None:
JSON_FIELDS = (
"raw_submission", "raw_assignment", "raw_harness", "actions"
)
for k, v in options.values:
value = v
if k in JSON_FIELDS:
json = json_module()
value = json.loads(v)
self.sheep_data[k] = value
else:
raise ValueError(
"Execution mode is set to a value that is not recognized: %s",
(options.mode, )
)
def finish(self, score = None, max_score = None):
"""
Marks the end of the test harness. When start was not initialized via
command line arguments, this command will print out the test results in
a human readable fashion. Otherwise it will print out JSON appropriate
for Galah to read.
"""
if score is None or max_score is None:
new_score = 0
new_max_score = 0
for i in self.tests.values():
if i is not None:
if i.result.score:
new_score += i.result.score
if i.result.max_score:
new_max_score += i.result.max_score
if score is None:
score = new_score
if max_score is None:
max_score = new_max_score
if self.execution_mode == "test":
for i in self.tests.values():
if i.result:
print i.result
print "-------"
print "Final result: %d out of %d" % (score, max_score)
elif self.execution_mode == "galah":
import json
results = {
"score": score,
"max_score": max_score,
"tests": []
}
for i in self.tests.values():
if i is not None:
results["tests"].append(i.result.to_galah_dict(i.name))
json.dump(results, sys.stdout)
else:
# A bad execution mode should be detected in the start() function.
# This is our fault if a user encounters this (and didn't set
# execution_mode themselves).
raise AssertionError("Unknown execution mode.")
def test(self, name, depends = None):
"""
A decorator that takes in a test name and some dependencies and makes
the harness aware of it all.
"""
def test_decorator(func):
def inner(*args, **kwargs):
return func(*args, **kwargs)
self.tests[inner] = Harness.Test(name, depends, inner)
return inner
return test_decorator
class CyclicDependency(RuntimeError):
def __init__(self, *args, **kwargs):
RuntimeError.__init__(self, *args, **kwargs)
class FailedDependencies(TestResult):
"""
A special :class:`TestResult` used by :meth:`Harness.run_tests` whenever
a test couldn't be run do to one of its dependencies failing.
.. code-block:: python
>>> a = interact.Harness.FailedDependencies()
>>> a.add_failure("Program compiles")
>>> a.add_failure("Program is sane")
>>> print a
Score: 0 out of 10
This test will only be run if all of the other tests it depends on pass first. Fix those tests *before* worrying about this one.
* Dependency *Program compiles* failed.
* Dependency *Program is sane* failed.
"""
def __init__(self, max_score = 10):
TestResult.__init__(
self,
brief = "This test will only be run if all of the other tests it "
"depends on pass first. Fix those tests *before* worrying "
"about this one.",
score = 0,
max_score = 10
)
def add_failure(self, test_name):
"""
Adds a new failure message to the test result signifying a
particular dependency has failed.
"""
self.add_message(
"Dependency *{dependency_name}* failed.",
dependency_name = test_name
)
def run_tests(self):
"""
Runs all of the tests the user has registered.
:raises: :class:`Harness.CyclicDependency` if a cyclic dependency exists
among the test functions.
Any tests that can't be run due to failed dependencies will have
instances of :class:`Harness.FailedDependencies` as their result.
"""
# Do a topological sort with a simple depth-first-search algorithm.
# Thank you wikipedia for the pseudocodeand inspiration:
# http://en.wikipedia.org/wiki/Topological_sorting
temporary_marks = set()
permanent_marks = set()
def visit(node):
if node in temporary_marks:
raise Harness.CyclicDependency(
"One or more cyclic dependencies exist among your test "
"functions."
)
if node not in permanent_marks:
# Temporarily mark the node. This is necessary because if we
# end up hitting a node that has a temporary mark as we recurse
# through the "DAG", it means we have a cycle. Hitting a
# permanently marked node when we're recursing is fine because
# all that means is two nodes share the same dependency.
temporary_marks.add(node)
dependencies_failed = []
for dependency in (self.tests[i] for i in node.depends):
if visit(dependency).result.is_failing():
dependencies_failed.append(dependency)
temporary_marks.remove(node)
permanent_marks.add(node)
if not dependencies_failed:
node.result = node.func()
else:
node.result = Harness.FailedDependencies()
for i in dependencies_failed:
node.result.add_failure(i.name)
return node
for test in self.tests.values():
visit(test)
def student_file(self, filename):
"""
Given a path to a student's file relative to the root of the student's
submission, returns an absolute path to that file.
"""
testables_directory = \
self.sheep_data.get("testables_directory", "../submission/")
path = os.path.join(testables_directory, filename)
# Ensure that we return an absolute path.
return _utils.resolve_path(path)
def student_files(self, *args):
"""
Very similar to student_file. Given many files as arguments, will return
a list of absolute paths to those files.
"""
return [self.student_file(i) for i in args]
|
__author__ = 'bmasquelier'
#
# Python script for Arduidom communication
#
import serial
import thread
from socket import *
import time
import subprocess
VIOLET = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
data = ""
pin1 = ""
oldpin1 = "x"
pinvalue = ["x", "x", "x", "x", "x", "x", "x", "x", "x", "x", "x", "x", "x", "x", ".", "xxx", "xxx", "xxx", "xxx", "xxx"]
pinmode = ["z", "z", "z", "z", "z", "z", "z", "z", "z", "z", "z", "z", "z", "z", ".", "z", "z", "z", "z", "z", "z"]
oldpinvalue = ["y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y"]
count = 1
arduino_rx = "aaa"
currentline = 99
radiorxpin = 0
def print_time(threadName, delay):
global pinvalue
global pinmode
global count
global arduino_rx
while True:
time.sleep(delay)
count += 1
if (count > 5):
count = 0
print("Send $$RF to Arduino")
arduino_rx = ""
ArduinoPort.write("$$RF")
while (arduino_rx == ""):
time.sleep(0.01)
print("arduino_rx=" + arduino_rx)
pinvalue = arduino_rx.rsplit(',')
print VIOLET + "%s: %s" % ( threadName, time.ctime(time.time()) )
print " D00 D01 D02 D03 D04 D05 D06 D07 D08 D09 D10 D11 D12 D13 x A0 A1 A2 A3 A4 A5"
print "pinvalue=" + str(pinvalue)
print " pinmode=" + str(pinmode)
print "arduino_rx=" + arduino_rx
def handler(clientsocket, clientaddr):
global pinvalue
global pinmode
global arduino_rx
print GREEN + "Count=" + str(count)
print GREEN + "Accepted connection from: " + str(clientaddr)
while 1:
data = clientsocket.recv(1024)
data = data.replace('\n', '')
data = data.replace('\r', '')
if not data:
break
else:
print ("Data From TCP : " + data)
if data[0:2] == 'HI':
print("HI Received !")
print("Send " + "$$" + data + " to Arduino")
arduino_rx = ""
ArduinoPort.write("$$" + data)
print("Wait Arduino Response...")
while (arduino_rx == ""):
time.sleep(0.1)
print ("Arduino says " + arduino_rx)
arduino_rx = ""
msg = data + "_OK"
print("Send " + msg + " to jeeDom")
clientsocket.send(msg)
if data[0:2] == 'CP':
print("CP Received !")
print("Send " + "$$" + data + " to Arduino")
arduino_rx = ""
ArduinoPort.write("$$" + data)
print("Wait Arduino Response...")
while (arduino_rx == ""):
time.sleep(0.1)
print ("Arduino says " + arduino_rx)
arduino_rx = ""
msg = data + "_OK"
print("Send " + msg + " to jeeDom")
clientsocket.send(msg)
if data[0:2] == 'SP':
print("SP Received !")
print("Send " + "$$" + data + " to Arduino")
ArduinoPort.write("$$" + data)
msg = data + "_OK"
print("Send " + msg + " to jeeDom")
clientsocket.send(msg)
if data[0:2] == 'SR':
print("SR Received !")
print("Send " + "$$" + data + " to Arduino")
ArduinoPort.write("$$" + data)
msg = data + "_OK"
print("Send " + msg + " to jeeDom")
clientsocket.send(msg)
if data[0:2] == 'GP':
pintoread = ((10 * int(data[2])) + int(data[3]))
print("GP " + str(pintoread) + "Received !")
print("Send $$RF to Arduino")
arduino_rx = ""
ArduinoPort.write("$$RF")
while (arduino_rx == ""):
time.sleep(0.01)
print("arduino_rx=" + arduino_rx)
pinvalue = arduino_rx.rsplit(',')
msg = "GP" + str(pintoread).zfill(2) + "=" + pinvalue[pintoread] + "_OK"
print("Send " + msg + " to jeeDom")
clientsocket.send(msg)
if data[0:2] == 'RF': # *** Refresh Datas
print("RF Received !")
print("Send $$RF to Arduino")
arduino_rx = ""
ArduinoPort.write("$$RF")
while (arduino_rx == ""):
time.sleep(0.01)
print("arduino_rx=" + arduino_rx)
pinvalue = arduino_rx.rsplit(',')
break
print("Close Socket")
clientsocket.close()
def tcpServerThread(threadName, delay):
global pinvalue
global pinmode
global arduino_rx
print GREEN + "Thread " + threadName + " Started."
host = "127.0.0.1"
port = 58174
addr = (host, port)
serversocket = socket(AF_INET, SOCK_STREAM)
serversocket.bind(addr)
serversocket.listen(0)
if not serversocket:
exit()
while 1:
print GREEN + "Server is waiting for connection..."
clientsocket, clientaddr = serversocket.accept()
thread.start_new_thread(handler, (clientsocket, clientaddr))
print "Server Stop to listening !"
serversocket.close()
def COMServer(threadName, delay):
global pinvalue
global pinmode
global arduino_rx
global currentline
global radiorxpin
print YELLOW + "Thread " + threadName + " Started."
while True: # This is the main loop of program...................................................................
line = ''
while True:
line = ArduinoPort.readline()
if line != '':
break
if line != '':
line = line.replace('\n', '')
line = line.replace('\r', '')
#if (line != arduino_rx): #Anti repetition
arduino_rx = line
print BLUE + "Arduino > [", line, "]"
if line.find("Pin") > -1: # Definition des pin modes via CP ou RESET
if line.find(" is ") > -1:
offset = 0
if line.find("APin") > -1:
offset = 1
print("Pin Mode configuration detected.")
currentline = int(line[4+offset])
if line[5+offset] != ' ':
currentline = 10 + int(line[5+offset])
if offset == 1:
currentline += 15
print("Current line = " + str(currentline))
if line.find("OUTPUT") > -1:
pinmode[currentline] = "o"
if line.find("INPUT") > -1:
pinmode[currentline] = "i"
if line.find("A-INPUT") > -1:
pinmode[currentline] = "a"
if line.find("DISABLED") > -1:
pinmode[currentline] = "z"
if line.find("Radio RX") > -1:
pinmode[currentline] = "r"
radiorxpin = currentline
if line.find("Radio TX") > -1:
pinmode[currentline] = "t"
if line.find("PWM") > -1:
pinmode[currentline] = "p"
if line.find("PIN") > -1:
#print "CRKKKK:" + str(line.find("SET"))
if line.find("SET") < 0:
pinnumber = int(line[3])
pinvalue[pinnumber] = int(line[5])
print(BLUE + "Pin number " + str(pinnumber))
print("Value " + str(pinvalue[pinnumber]))
cmd = 'nice -n 19 /usr/bin/php /usr/share/nginx/www/jeedom/plugins/arduidom/core/php/jeeArduidom.php '
cmd += pinnumber
cmd += "="
cmd += pinvalue[pinnumber]
print(RED + cmd)
if line.find("RFD:") > -1:
print "Radio Code Received:"
if line.find("SET") < 0:
pinnumber = radiorxpin
print(BLUE + "Pin number " + str(pinnumber))
print("Value " + str(pinvalue[pinnumber]))
cmd = 'nice -n 19 /usr/bin/php /usr/share/nginx/www/jeedom/plugins/arduidom/core/php/jeeArduidom.php '
cmd += str(pinnumber)
cmd += "="
cmd += '"' + line + '"'
print(RED + cmd)
subprocess.Popen(cmd, shell=True)
print RED
print
print
print
print
print
print
print "######################################"
print "# ArduiDom - Arduino Link for jeeDom #"
print "# by Bobox 59 #"
print "######################################"
print
print "Opening Arduino USB Port..."
ArduinoPort = serial.Serial('/dev/ttyUSB0', 115200, timeout=0.1)
# ArduinoPort = serial.Serial('/dev/cu.usbserial-A500SYZT', 115200, timeout=0.1)
print YELLOW + "Launch USB Thread..."
try:
thread.start_new_thread( COMServer, ("TH-COMServer", 1))
except:
print "Error with Thread TH-COMServer"
time.sleep(1)
print RED + "En attente de l'arduino (HELLO)"
ArduinoPort.write("$$HI")
while (arduino_rx !="HELLO"):
time.sleep(0.1)
arduino_rx = ""
print ("Launch TCP Thread...")
try:
thread.start_new_thread( tcpServerThread, ("TH-TcpServer", 1))
except:
print "Error with Thread TH-TcpServer"
try:
thread.start_new_thread( print_time, ("TH-time", 2))
except:
print "Error with Thread TH-Time"
while 1:
pass
ajout routine detection changements
detection des changements de valeurs avec envoi au jeedom
__author__ = 'bmasquelier'
#
# Python script for Arduidom communication
#
import serial
import thread
from socket import *
import time
import subprocess
VIOLET = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
data = ""
pin1 = ""
oldpin1 = "x"
pinvalue = ["x", "x", "x", "x", "x", "x", "x", "x", "x", "x", "x", "x", "x", "x", ".", "xxx", "xxx", "xxx", "xxx", "xxx"]
pinmode = ["z", "z", "z", "z", "z", "z", "z", "z", "z", "z", "z", "z", "z", "z", ".", "z", "z", "z", "z", "z", "z"]
oldpinvalue = ["y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y", "y"]
count = 1
arduino_rx = "aaa"
currentline = 99
radiorxpin = 0
def print_time(threadName, delay):
global pinvalue
global pinmode
global count
global arduino_rx
while True:
time.sleep(delay)
count += 1
if (count > 2):
count = 1
print("Send $$RF to Arduino")
arduino_rx = ""
ArduinoPort.write("$$RF")
while (arduino_rx == ""):
time.sleep(0.01)
print("arduino_rx=" + arduino_rx)
pinvalue = arduino_rx.rsplit(',')
print VIOLET + "%s: %s" % ( threadName, time.ctime(time.time()) )
print " D00 D01 D02 D03 D04 D05 D06 D07 D08 D09 D10 D11 D12 D13 x A0 A1 A2 A3 A4 A5"
print "pinvalue=" + str(pinvalue)
if (oldpinvalue != pinvalue):
for pinnumber in range(0, 15):
cmd = 'nice -n 19 /usr/bin/php /usr/share/nginx/www/jeedom/plugins/arduidom/core/php/jeeArduidom.php '
cmd += pinnumber
cmd += "="
cmd += pinvalue[pinnumber]
print(RED + cmd)
oldpinvalue = pinvalue
print " pinmode=" + str(pinmode)
print "arduino_rx=" + arduino_rx
def handler(clientsocket, clientaddr):
global pinvalue
global pinmode
global arduino_rx
print GREEN + "Count=" + str(count)
print GREEN + "Accepted connection from: " + str(clientaddr)
while 1:
data = clientsocket.recv(1024)
data = data.replace('\n', '')
data = data.replace('\r', '')
if not data:
break
else:
print ("Data From TCP : " + data)
if data[0:2] == 'HI':
print("HI Received !")
print("Send " + "$$" + data + " to Arduino")
arduino_rx = ""
ArduinoPort.write("$$" + data)
print("Wait Arduino Response...")
while (arduino_rx == ""):
time.sleep(0.1)
print ("Arduino says " + arduino_rx)
arduino_rx = ""
msg = data + "_OK"
print("Send " + msg + " to jeeDom")
clientsocket.send(msg)
if data[0:2] == 'CP':
print("CP Received !")
print("Send " + "$$" + data + " to Arduino")
arduino_rx = ""
ArduinoPort.write("$$" + data)
print("Wait Arduino Response...")
while (arduino_rx == ""):
time.sleep(0.1)
print ("Arduino says " + arduino_rx)
arduino_rx = ""
msg = data + "_OK"
print("Send " + msg + " to jeeDom")
clientsocket.send(msg)
if data[0:2] == 'SP':
print("SP Received !")
print("Send " + "$$" + data + " to Arduino")
ArduinoPort.write("$$" + data)
msg = data + "_OK"
print("Send " + msg + " to jeeDom")
clientsocket.send(msg)
if data[0:2] == 'SR':
print("SR Received !")
print("Send " + "$$" + data + " to Arduino")
ArduinoPort.write("$$" + data)
msg = data + "_OK"
print("Send " + msg + " to jeeDom")
clientsocket.send(msg)
if data[0:2] == 'GP':
pintoread = ((10 * int(data[2])) + int(data[3]))
print("GP " + str(pintoread) + "Received !")
print("Send $$RF to Arduino")
arduino_rx = ""
ArduinoPort.write("$$RF")
while (arduino_rx == ""):
time.sleep(0.01)
print("arduino_rx=" + arduino_rx)
pinvalue = arduino_rx.rsplit(',')
msg = "GP" + str(pintoread).zfill(2) + "=" + pinvalue[pintoread] + "_OK"
print("Send " + msg + " to jeeDom")
clientsocket.send(msg)
if data[0:2] == 'RF': # *** Refresh Datas
print("RF Received !")
print("Send $$RF to Arduino")
arduino_rx = ""
ArduinoPort.write("$$RF")
while (arduino_rx == ""):
time.sleep(0.01)
print("arduino_rx=" + arduino_rx)
pinvalue = arduino_rx.rsplit(',')
break
print("Close Socket")
clientsocket.close()
def tcpServerThread(threadName, delay):
global pinvalue
global pinmode
global arduino_rx
print GREEN + "Thread " + threadName + " Started."
host = "127.0.0.1"
port = 58174
addr = (host, port)
serversocket = socket(AF_INET, SOCK_STREAM)
serversocket.bind(addr)
serversocket.listen(0)
if not serversocket:
exit()
while 1:
print GREEN + "Server is waiting for connection..."
clientsocket, clientaddr = serversocket.accept()
thread.start_new_thread(handler, (clientsocket, clientaddr))
print "Server Stop to listening !"
serversocket.close()
def COMServer(threadName, delay):
global pinvalue
global pinmode
global arduino_rx
global currentline
global radiorxpin
print YELLOW + "Thread " + threadName + " Started."
while True: # This is the main loop of program...................................................................
line = ''
while True:
line = ArduinoPort.readline()
if line != '':
break
if line != '':
line = line.replace('\n', '')
line = line.replace('\r', '')
#if (line != arduino_rx): #Anti repetition
arduino_rx = line
print BLUE + "Arduino > [", line, "]"
if line.find("Pin") > -1: # Definition des pin modes via CP ou RESET
if line.find(" is ") > -1:
offset = 0
if line.find("APin") > -1:
offset = 1
print("Pin Mode configuration detected.")
currentline = int(line[4+offset])
if line[5+offset] != ' ':
currentline = 10 + int(line[5+offset])
if offset == 1:
currentline += 15
print("Current line = " + str(currentline))
if line.find("OUTPUT") > -1:
pinmode[currentline] = "o"
if line.find("INPUT") > -1:
pinmode[currentline] = "i"
if line.find("A-INPUT") > -1:
pinmode[currentline] = "a"
if line.find("DISABLED") > -1:
pinmode[currentline] = "z"
if line.find("Radio RX") > -1:
pinmode[currentline] = "r"
radiorxpin = currentline
if line.find("Radio TX") > -1:
pinmode[currentline] = "t"
if line.find("PWM") > -1:
pinmode[currentline] = "p"
if line.find("PIN") > -1:
#print "CRKKKK:" + str(line.find("SET"))
if line.find("SET") < 0:
pinnumber = int(line[3])
pinvalue[pinnumber] = int(line[5])
print(BLUE + "Pin number " + str(pinnumber))
print("Value " + str(pinvalue[pinnumber]))
cmd = 'nice -n 19 /usr/bin/php /usr/share/nginx/www/jeedom/plugins/arduidom/core/php/jeeArduidom.php '
cmd += pinnumber
cmd += "="
cmd += pinvalue[pinnumber]
print(RED + cmd)
if line.find("RFD:") > -1:
print "Radio Code Received:"
if line.find("SET") < 0:
pinnumber = radiorxpin
print(BLUE + "Pin number " + str(pinnumber))
print("Value " + str(pinvalue[pinnumber]))
cmd = 'nice -n 19 /usr/bin/php /usr/share/nginx/www/jeedom/plugins/arduidom/core/php/jeeArduidom.php '
cmd += str(pinnumber)
cmd += "="
cmd += '"' + line + '"'
print(RED + cmd)
subprocess.Popen(cmd, shell=True)
print RED
print
print
print
print
print
print
print "######################################"
print "# ArduiDom - Arduino Link for jeeDom #"
print "# by Bobox 59 #"
print "######################################"
print
print "Opening Arduino USB Port..."
ArduinoPort = serial.Serial('/dev/ttyUSB0', 115200, timeout=0.1)
# ArduinoPort = serial.Serial('/dev/cu.usbserial-A500SYZT', 115200, timeout=0.1)
print YELLOW + "Launch USB Thread..."
try:
thread.start_new_thread( COMServer, ("TH-COMServer", 1))
except:
print "Error with Thread TH-COMServer"
time.sleep(1)
print RED + "En attente de l'arduino (HELLO)"
ArduinoPort.write("$$HI")
while (arduino_rx !="HELLO"):
time.sleep(0.1)
arduino_rx = ""
print ("Launch TCP Thread...")
try:
thread.start_new_thread( tcpServerThread, ("TH-TcpServer", 1))
except:
print "Error with Thread TH-TcpServer"
try:
thread.start_new_thread( print_time, ("TH-time", 1))
except:
print "Error with Thread TH-Time"
while 1:
pass
|
import threading
import traceback
import time
from Queue import Queue
from sfa.util.sfalogging import logger
def ThreadedMethod(callable, results, errors):
"""
A function decorator that returns a running thread. The thread
runs the specified callable and stores the result in the specified
results queue
"""
def wrapper(args, kwds):
class ThreadInstance(threading.Thread):
def run(self):
try:
results.put(callable(*args, **kwds))
except Exception, e:
logger.log_exc('ThreadManager: Error in thread: ')
errors.put(traceback.format_exc())
thread = ThreadInstance()
thread.start()
return thread
return wrapper
class ThreadManager:
"""
ThreadManager executes a callable in a thread and stores the result
in a thread safe queue.
"""
results = Queue()
errors = Queue()
threads = []
def run (self, method, *args, **kwds):
"""
Execute a callable in a separate thread.
"""
method = ThreadedMethod(method, self.results, self.errors)
thread = method(args, kwds)
self.threads.append(thread)
start = run
def join(self):
"""
Wait for all threads to complete
"""
for thread in self.threads:
thread.join()
def get_results(self, lenient=True):
"""
Return a list of all the results so far. Blocks until
all threads are finished.
If lienent is set to false the error queue will be checked before
the response is returned. If there are errors in the queue an SFA Fault will
be raised.
"""
self.join()
results = []
if not lenient:
errors = self.get_errors()
if errors:
raise Exception(errors[0])
while not self.results.empty():
results.append(self.results.get())
return results
def get_errors(self):
"""
Return a list of all errors. Blocks untill all threads are finished
"""
self.join()
errors = []
while not self.errors.empty():
errors.append(self.errors.get())
return errors
def get_return_value(self):
"""
Get the value that should be returuned to the client. If there are errors then the
first error is returned. If there are no errors, then the first result is returned
"""
if __name__ == '__main__':
def f(name, n, sleep=1):
nums = []
for i in range(n, n+5):
print "%s: %s" % (name, i)
nums.append(i)
time.sleep(sleep)
return nums
def e(name, n, sleep=1):
nums = []
for i in range(n, n+3) + ['n', 'b']:
print "%s: 1 + %s:" % (name, i)
nums.append(i + 1)
time.sleep(sleep)
return nums
threads = ThreadManager()
threads.run(f, "Thread1", 10, 2)
threads.run(f, "Thread2", -10, 1)
threads.run(e, "Thread3", 19, 1)
#results = threads.get_results()
#errors = threads.get_errors()
#print "Results:", results
#print "Errors:", errors
results_xlenient = threads.get_results(lenient=False)
ThreadManager variables should be instance variables not class variables
import threading
import traceback
import time
from Queue import Queue
from sfa.util.sfalogging import logger
def ThreadedMethod(callable, results, errors):
"""
A function decorator that returns a running thread. The thread
runs the specified callable and stores the result in the specified
results queue
"""
def wrapper(args, kwds):
class ThreadInstance(threading.Thread):
def run(self):
try:
results.put(callable(*args, **kwds))
except Exception, e:
logger.log_exc('ThreadManager: Error in thread: ')
errors.put(traceback.format_exc())
thread = ThreadInstance()
thread.start()
return thread
return wrapper
class ThreadManager:
"""
ThreadManager executes a callable in a thread and stores the result
in a thread safe queue.
"""
def __init__(self):
self.results = Queue()
self.errors = Queue()
self.threads = []
def run (self, method, *args, **kwds):
"""
Execute a callable in a separate thread.
"""
method = ThreadedMethod(method, self.results, self.errors)
thread = method(args, kwds)
self.threads.append(thread)
start = run
def join(self):
"""
Wait for all threads to complete
"""
for thread in self.threads:
thread.join()
def get_results(self, lenient=True):
"""
Return a list of all the results so far. Blocks until
all threads are finished.
If lienent is set to false the error queue will be checked before
the response is returned. If there are errors in the queue an SFA Fault will
be raised.
"""
self.join()
results = []
if not lenient:
errors = self.get_errors()
if errors:
raise Exception(errors[0])
while not self.results.empty():
results.append(self.results.get())
return results
def get_errors(self):
"""
Return a list of all errors. Blocks untill all threads are finished
"""
self.join()
errors = []
while not self.errors.empty():
errors.append(self.errors.get())
return errors
def get_return_value(self):
"""
Get the value that should be returuned to the client. If there are errors then the
first error is returned. If there are no errors, then the first result is returned
"""
if __name__ == '__main__':
def f(name, n, sleep=1):
nums = []
for i in range(n, n+5):
print "%s: %s" % (name, i)
nums.append(i)
time.sleep(sleep)
return nums
def e(name, n, sleep=1):
nums = []
for i in range(n, n+3) + ['n', 'b']:
print "%s: 1 + %s:" % (name, i)
nums.append(i + 1)
time.sleep(sleep)
return nums
threads = ThreadManager()
threads.run(f, "Thread1", 10, 2)
threads.run(f, "Thread2", -10, 1)
threads.run(e, "Thread3", 19, 1)
#results = threads.get_results()
#errors = threads.get_errors()
#print "Results:", results
#print "Errors:", errors
results_xlenient = threads.get_results(lenient=False)
|
# -*- coding: utf-8 -*-
#
# xrt documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 01 09:52:50 2012.
import sys
import os
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['OpenGL', 'OpenGL.GL', 'OpenGL.GLU', 'OpenGL.GLUT',
'OpenGL.arrays', 'pyopencl',
'PyQt5', 'PyQt5.QtCore', 'PyQt5.QtGui', 'PyQt5.QtWidgets',
'PyQt5.QtOpenGL', 'PyQt5.QtWebEngineWidgets',
'matplotlib.backends.backend_qt5agg',
'PySide', 'PySide.QtCore',
'spyder.widgets', 'spyderlib.widgets']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# import Cloud
#import cloud_sptheme as csp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, '..')
sys.path.insert(0, '.')
#sys.path.append('..')
sys.path.append(os.path.abspath('exts'))
#autodoc_mock_imports = ["PyQt5.QtWebKitWidgets"]
import matplotlib as mpl
mpl.use('agg')
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath']
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'animation',
'verify']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'xrt'
copyright = u'2014 Konstantin Klementiev, Roman Chernikov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3.0'
# The full version, including alpha/beta/rc tags.
release = '1.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'mytheme'
#html_theme = "cloud"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"rightsidebar": False,
# "stickysidebar": True,
"collapsiblesidebar": True}
# "max_width": 20,
# "externalrefs": True,
# "roottarget": "index"
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["./_themes"]
#html_theme_path = [csp.get_theme_dir()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_images/logo-xrt.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_images/xrt_logo.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
# html_theme = 'default'
html_static_path = []
else:
# html_theme = 'nature'
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xrtdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'xrt.tex', u'xrt Documentation',
u'Konstantin Klementiev', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xrt', u'xrt Documentation',
[u'Konstantin Klementiev'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'xrt', u'xrt Documentation',
u'Konstantin Klementiev', 'xrt',
'parallelizable python code for ray tracing of x-ray instruments',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
update Sphinx theme
# -*- coding: utf-8 -*-
#
# xrt documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 01 09:52:50 2012.
import sys
import os
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['OpenGL', 'OpenGL.GL', 'OpenGL.GLU', 'OpenGL.GLUT',
'OpenGL.arrays', 'pyopencl',
'PyQt5', 'PyQt5.QtCore', 'PyQt5.QtGui', 'PyQt5.QtWidgets',
'PyQt5.QtOpenGL', 'PyQt5.QtWebEngineWidgets',
'matplotlib.backends.backend_qt5agg',
'PySide', 'PySide.QtCore',
'spyder.widgets', 'spyderlib.widgets']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# import Cloud
#import cloud_sptheme as csp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, '..')
sys.path.insert(0, '.')
#sys.path.append('..')
sys.path.append(os.path.abspath('exts'))
#autodoc_mock_imports = ["PyQt5.QtWebKitWidgets"]
import matplotlib as mpl
mpl.use('agg')
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath']
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'animation',
'verify']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'xrt'
copyright = u'2014 Konstantin Klementiev, Roman Chernikov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3.0'
# The full version, including alpha/beta/rc tags.
release = '1.3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'mytheme'
#html_theme = "cloud"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"rightsidebar": False,
# "stickysidebar": True,
"collapsiblesidebar": True,
"body_min_width": '96%',
# "max_width": 20,
# "externalrefs": True,
# "roottarget": "index"
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["./_themes"]
#html_theme_path = [csp.get_theme_dir()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_images/logo-xrt.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_images/xrt_logo.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
# html_theme = 'default'
html_static_path = []
else:
# html_theme = 'nature'
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'xrtdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'xrt.tex', u'xrt Documentation',
u'Konstantin Klementiev', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'xrt', u'xrt Documentation',
[u'Konstantin Klementiev'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'xrt', u'xrt Documentation',
u'Konstantin Klementiev', 'xrt',
'parallelizable python code for ray tracing of x-ray instruments',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
# Yith Library Server is a password storage server.
# Copyright (C) 2012-2013 Yaco Sistemas
# Copyright (C) 2012-2013 Alejandro Blanco Escudero <alejandro.b.e@gmail.com>
# Copyright (C) 2012-2015 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from pyramid_sqlalchemy import BaseObject
from sqlalchemy import Column, DateTime, Integer, String, Text
from sqlalchemy import ForeignKey, func
from sqlalchemy.dialects.postgresql import ARRAY, JSON, UUID
from sqlalchemy.orm import relationship, backref
def now():
return datetime.utcnow()
class Password(BaseObject):
__tablename__ = 'passwords'
id = Column(UUID, primary_key=True, default=func.uuid_generate_v4())
creation = Column(DateTime, nullable=False, default=now)
modification = Column(DateTime, nullable=False, default=now, onupdate=now)
notes = Column(Text, nullable=False, default='')
tags = Column(ARRAY(Text, dimensions=1), nullable=False, default=[])
secret = Column(JSON(none_as_null=True), nullable=False)
account = Column(String, nullable=False, default='')
service = Column(String, nullable=False, default='')
expiration = Column(Integer, nullable=True)
user_id = Column(UUID, ForeignKey('users.id'), nullable=False)
user = relationship(
'User',
backref=backref('passwords', cascade='all, delete-orphan'),
)
def as_dict(self):
return dict(
id=self.id,
creation=self.creation,
modification=self.modification,
notes=self.notes,
tags=self.tags,
secret=self.secret,
account=self.account,
service=self.service,
expiration=self.expiration,
user=self.user_id,
owner=self.user_id, # backwards compatibility
)
The secret data format should be opaque to the server
# Yith Library Server is a password storage server.
# Copyright (C) 2012-2013 Yaco Sistemas
# Copyright (C) 2012-2013 Alejandro Blanco Escudero <alejandro.b.e@gmail.com>
# Copyright (C) 2012-2015 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from pyramid_sqlalchemy import BaseObject
from sqlalchemy import Column, DateTime, Integer, String, Text
from sqlalchemy import ForeignKey, func
from sqlalchemy.dialects.postgresql import ARRAY, UUID
from sqlalchemy.orm import relationship, backref
def now():
return datetime.utcnow()
class Password(BaseObject):
__tablename__ = 'passwords'
id = Column(UUID, primary_key=True, default=func.uuid_generate_v4())
creation = Column(DateTime, nullable=False, default=now)
modification = Column(DateTime, nullable=False, default=now, onupdate=now)
notes = Column(Text, nullable=False, default='')
tags = Column(ARRAY(Text, dimensions=1), nullable=False, default=[])
secret = Column(String, nullable=False, default='')
account = Column(String, nullable=False, default='')
service = Column(String, nullable=False, default='')
expiration = Column(Integer, nullable=True)
user_id = Column(UUID, ForeignKey('users.id'), nullable=False)
user = relationship(
'User',
backref=backref('passwords', cascade='all, delete-orphan'),
)
def as_dict(self):
return dict(
id=self.id,
creation=self.creation,
modification=self.modification,
notes=self.notes,
tags=self.tags,
secret=self.secret,
account=self.account,
service=self.service,
expiration=self.expiration,
user=self.user_id,
owner=self.user_id, # backwards compatibility
)
|
import RPi.GPIO as GPIO
import Gmail
YAHOO_PIN = 32
MSOFT_PIN = 36
CHECK_NOW_PIN = 12
def initialize_gpio():
GPIO.setmode(GPIO.BOARD)
GPIO.setup([Gmail.PIN, YAHOO_PIN, MSOFT_PIN], GPIO.OUT)
GPIO.setup(CHECK_NOW_PIN, GPIO.IN)
GPIO.add_event_detect(CHECK_NOW_PIN, GPIO.RISING, callback=check_all_mail_now, bouncetime=1000)
def check_all_mail_now(_):
Gmail.refresh()
def main():
try:
initialize_gpio()
Gmail.start()
raw_input("\nPress any key to exit.\n")
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
Added pull-down resistor to button event
import RPi.GPIO as GPIO
import Gmail
YAHOO_PIN = 32
MSOFT_PIN = 36
CHECK_NOW_PIN = 12
def initialize_gpio():
GPIO.setmode(GPIO.BOARD)
GPIO.setup([Gmail.PIN, YAHOO_PIN, MSOFT_PIN], GPIO.OUT)
GPIO.setup(CHECK_NOW_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(CHECK_NOW_PIN, GPIO.RISING, callback=check_all_mail_now, bouncetime=1000)
def check_all_mail_now(_):
Gmail.refresh()
def main():
try:
initialize_gpio()
Gmail.start()
raw_input("\nPress any key to exit.\n")
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
|
"""
Plotting class to be used by Log.
"""
import time
import numpy as nm
from sfepy.base.base import Output, Struct
class LogPlotter(Struct):
"""
LogPlotter to be used by :class:`sfepy.base.log.Log`.
"""
output = Output('plotter:')
output = staticmethod(output)
def __init__(self, aggregate=100):
Struct.__init__(self, aggregate=aggregate)
def process_command(self, command):
from matplotlib.ticker import LogLocator, AutoLocator
self.output(command[0])
if command[0] == 'ig':
self.ig = command[1]
elif command[0] == 'plot':
xdata, ydata = command[1:]
ig = self.ig
ax = self.ax[ig]
ax.set_yscale(self.yscales[ig])
ax.yaxis.grid(True)
ax.plot(xdata, ydata)
if self.yscales[ig] == 'log':
ymajor_formatter = ax.yaxis.get_major_formatter()
ymajor_formatter.label_minor(True)
yminor_locator = LogLocator()
else:
yminor_locator = AutoLocator()
self.ax[ig].yaxis.set_minor_locator(yminor_locator)
elif command[0] == 'vline':
x, kwargs = command[1:]
self.vlines[self.ig].append((x, kwargs))
elif command[0] == 'clear':
self.ax[self.ig].cla()
elif command[0] == 'legends':
for ig, ax in enumerate(self.ax):
try:
ax.legend(self.data_names[ig])
except:
pass
if self.xlabels[ig]:
ax.set_xlabel(self.xlabels[ig])
if self.ylabels[ig]:
ax.set_ylabel(self.ylabels[ig])
for x, kwargs in self.vlines[ig]:
ax.axvline(x, **kwargs)
try:
self.plt.tight_layout(pad=0.5)
except:
pass
elif command[0] == 'add_axis':
ig, names, yscale, xlabel, ylabel = command[1:]
self.data_names[ig] = names
self.yscales[ig] = yscale
self.xlabels[ig] = xlabel
self.ylabels[ig] = ylabel
self.n_gr = len(self.data_names)
self.make_axes()
elif command[0] == 'save':
self.fig.savefig(command[1])
self.pipe.send(True) # Acknowledge save.
def terminate(self):
if self.ii:
self.output('processed %d commands' % self.ii)
self.output('ended.')
self.plt.close('all')
def poll_draw(self):
while 1:
self.ii = 0
while 1:
if not self.pipe.poll():
break
command = self.pipe.recv()
can_break = False
if command is None:
self.terminate()
return False
elif command[0] == 'continue':
can_break = True
else:
self.process_command(command)
if (self.ii >= self.aggregate) and can_break:
break
self.ii += 1
if self.ii:
self.fig.canvas.draw()
self.output('processed %d commands' % self.ii)
time.sleep(1.0)
return True
def make_axes(self):
from sfepy.linalg import cycle
self.fig.clf()
self.ax = []
n_col = min(5.0, nm.fix(nm.sqrt(self.n_gr)))
if int(n_col) == 0:
n_row = 0
else:
n_row = int(nm.ceil(self.n_gr / n_col))
n_col = int(n_col)
for ii, (ir, ic) in enumerate(cycle((n_col, n_row))):
if ii == self.n_gr: break
self.ax.append(self.fig.add_subplot(n_row, n_col, ii + 1))
self.vlines.setdefault(ii, [])
def __call__(self, pipe, log_file, data_names, yscales, xlabels, ylabels):
"""
Sets-up the plotting window, sets GTK event loop timer callback to
callback() returned by self.poll_draw(). The callback does the actual
plotting, taking commands out of `pipe`, and is called every second.
Note that pyplot _must_ be imported here and not in this module so that
the import occurs _after_ the plotting process is started in that
process.
"""
import matplotlib.pyplot as plt
self.plt = plt
self.output.set_output(filename=log_file)
self.output('starting plotter...')
self.pipe = pipe
self.data_names = data_names
self.yscales = yscales
self.xlabels = xlabels
self.ylabels = ylabels
self.n_gr = len(data_names)
self.vlines = {}
self.fig = self.plt.figure()
self.make_axes()
import threading
draw_thread = threading.Thread(target=self.poll_draw)
draw_thread.start()
self.output('...done')
self.plt.show()
draw_thread.join()
update LogPlotter for new sleep argument
"""
Plotting class to be used by Log.
"""
import time
import numpy as nm
from sfepy.base.base import Output, Struct
class LogPlotter(Struct):
"""
LogPlotter to be used by :class:`sfepy.base.log.Log`.
"""
output = Output('plotter:')
output = staticmethod(output)
def __init__(self, aggregate=100, sleep=1.0):
Struct.__init__(self, aggregate=aggregate, sleep=sleep)
def process_command(self, command):
from matplotlib.ticker import LogLocator, AutoLocator
self.output(command[0])
if command[0] == 'ig':
self.ig = command[1]
elif command[0] == 'plot':
xdata, ydata = command[1:]
ig = self.ig
ax = self.ax[ig]
ax.set_yscale(self.yscales[ig])
ax.yaxis.grid(True)
ax.plot(xdata, ydata)
if self.yscales[ig] == 'log':
ymajor_formatter = ax.yaxis.get_major_formatter()
ymajor_formatter.label_minor(True)
yminor_locator = LogLocator()
else:
yminor_locator = AutoLocator()
self.ax[ig].yaxis.set_minor_locator(yminor_locator)
elif command[0] == 'vline':
x, kwargs = command[1:]
self.vlines[self.ig].append((x, kwargs))
elif command[0] == 'clear':
self.ax[self.ig].cla()
elif command[0] == 'legends':
for ig, ax in enumerate(self.ax):
try:
ax.legend(self.data_names[ig])
except:
pass
if self.xlabels[ig]:
ax.set_xlabel(self.xlabels[ig])
if self.ylabels[ig]:
ax.set_ylabel(self.ylabels[ig])
for x, kwargs in self.vlines[ig]:
ax.axvline(x, **kwargs)
try:
self.plt.tight_layout(pad=0.5)
except:
pass
elif command[0] == 'add_axis':
ig, names, yscale, xlabel, ylabel = command[1:]
self.data_names[ig] = names
self.yscales[ig] = yscale
self.xlabels[ig] = xlabel
self.ylabels[ig] = ylabel
self.n_gr = len(self.data_names)
self.make_axes()
elif command[0] == 'save':
self.fig.savefig(command[1])
self.pipe.send(True) # Acknowledge save.
def terminate(self):
if self.ii:
self.output('processed %d commands' % self.ii)
self.output('ended.')
self.plt.close('all')
def poll_draw(self):
while 1:
self.ii = 0
while 1:
if not self.pipe.poll():
break
command = self.pipe.recv()
can_break = False
if command is None:
self.terminate()
return False
elif command[0] == 'continue':
can_break = True
else:
self.process_command(command)
if (self.ii >= self.aggregate) and can_break:
break
self.ii += 1
if self.ii:
self.fig.canvas.draw()
self.output('processed %d commands' % self.ii)
time.sleep(self.sleep)
return True
def make_axes(self):
from sfepy.linalg import cycle
self.fig.clf()
self.ax = []
n_col = min(5.0, nm.fix(nm.sqrt(self.n_gr)))
if int(n_col) == 0:
n_row = 0
else:
n_row = int(nm.ceil(self.n_gr / n_col))
n_col = int(n_col)
for ii, (ir, ic) in enumerate(cycle((n_col, n_row))):
if ii == self.n_gr: break
self.ax.append(self.fig.add_subplot(n_row, n_col, ii + 1))
self.vlines.setdefault(ii, [])
def __call__(self, pipe, log_file, data_names, yscales, xlabels, ylabels):
"""
Sets-up the plotting window, sets GTK event loop timer callback to
callback() returned by self.poll_draw(). The callback does the actual
plotting, taking commands out of `pipe`, and is called every second.
Note that pyplot _must_ be imported here and not in this module so that
the import occurs _after_ the plotting process is started in that
process.
"""
import matplotlib.pyplot as plt
self.plt = plt
self.output.set_output(filename=log_file)
self.output('starting plotter...')
self.pipe = pipe
self.data_names = data_names
self.yscales = yscales
self.xlabels = xlabels
self.ylabels = ylabels
self.n_gr = len(data_names)
self.vlines = {}
self.fig = self.plt.figure()
self.make_axes()
import threading
draw_thread = threading.Thread(target=self.poll_draw)
draw_thread.start()
self.output('...done')
self.plt.show()
draw_thread.join()
|
# -*- coding: utf-8 -*-
#
# GPy documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 18 15:30:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
print "python exec:", sys.executable
print "sys.path:", sys.path
try:
import numpy
print "numpy: %s, %s" % (numpy.__version__, numpy.__file__)
except ImportError:
print "no numpy"
try:
import matplotlib
print "matplotlib: %s, %s" % (matplotlib.__version__, matplotlib.__file__)
except ImportError:
print "no matplotlib"
try:
import ipython
print "ipython: %s, %s" % (ipython.__version__, ipython.__file__)
except ImportError:
print "no ipython"
try:
import sphinx
print "sphinx: %s, %s" % (sphinx.__version__, sphinx.__file__)
except ImportError:
print "no sphinx"
#parent = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
#sys.path.insert(0, parent)
#sys.path.append(parent)
#parent = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'GPy'))
#sys.path.insert(0, parent)
#sys.path.append(parent)
#APP_DIR = os.path.normpath(os.path.join(os.getcwd(), '../'))
#PACKAGE_DIR1 = os.path.normpath(os.path.join(os.getcwd(), '../'))
#sys.path.insert(0, APP_DIR)
#sys.path.insert(0, PACKAGE_DIR1)
#sys.path.insert(0, os.path.abspath('../GPy'))
print "sys.path:", sys.path
#sys.path.insert(0, os.getcwd() + "/..")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('../GPy'))
#print "sys.path.after:", sys.path
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#sys.path.insert(0, os.path.abspath('./sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
print "Importing extensions"
extensions = ['sphinx.ext.autodoc',
#'sphinx.ext.doctest'
'sphinx.ext.viewcode',
'sphinx.ext.pngmath'
#'ipython_directive',
#'ipython_console_highlighting.py'
#'matplotlib.sphinxext.mathmpl',
#'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.plot_directive',
]
print "finished importing"
##############################################################################
##
## Mock out imports with C dependencies because ReadTheDocs can't build them.
#############################################################################
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
#import mock
print "Mocking"
MOCK_MODULES = ['pylab', 'matplotlib', 'sympy', 'sympy.utilities', 'sympy.utilities.codegen', 'sympy.core.cache', 'sympy.core', 'sympy.parsing', 'sympy.parsing.sympy_parser']
#'matplotlib', 'matplotlib.color', 'matplotlib.pyplot', 'pylab' ]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# ----------------------- READTHEDOCS ------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
on_rtd = True
if on_rtd:
print "I am here"
sys.path.append(os.path.abspath('../GPy'))
import subprocess
proc = subprocess.Popen("pwd", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
proc = subprocess.Popen("ls ../", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
proc = subprocess.Popen("sphinx-apidoc -f -o . ../GPy", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
#os.system("cd ..")
#os.system("cd ./docs")
print "Compiled files"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
copyright = u'2013, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# This is to revert to the default theme on readthedocs
html_style = '/default.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GPy.tex', u'GPy Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gpy', u'GPy Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GPy', u'GPy Documentation',
u'Author', 'GPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'GPy'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2013, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
#############################################################################
#
# Include constructors in all the docs
# Got this method from:
# http://stackoverflow.com/questions/5599254/how-to-use-sphinxs-autodoc-to-document-a-classs-init-self-method
#def skip(app, what, name, obj, skip, options):
#if name == "__init__":
#return False
#return skip
#def setup(app):
#app.connect("autodoc-skip-member", skip)
autodoc_member_order = "source"
Added a path back
# -*- coding: utf-8 -*-
#
# GPy documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 18 15:30:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
print "python exec:", sys.executable
print "sys.path:", sys.path
try:
import numpy
print "numpy: %s, %s" % (numpy.__version__, numpy.__file__)
except ImportError:
print "no numpy"
try:
import matplotlib
print "matplotlib: %s, %s" % (matplotlib.__version__, matplotlib.__file__)
except ImportError:
print "no matplotlib"
try:
import ipython
print "ipython: %s, %s" % (ipython.__version__, ipython.__file__)
except ImportError:
print "no ipython"
try:
import sphinx
print "sphinx: %s, %s" % (sphinx.__version__, sphinx.__file__)
except ImportError:
print "no sphinx"
sys.path.insert(0, os.getcwd() + "/..")
#parent = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
#sys.path.insert(0, parent)
#sys.path.append(parent)
#parent = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'GPy'))
#sys.path.insert(0, parent)
#sys.path.append(parent)
#APP_DIR = os.path.normpath(os.path.join(os.getcwd(), '../'))
#PACKAGE_DIR1 = os.path.normpath(os.path.join(os.getcwd(), '../'))
#sys.path.insert(0, APP_DIR)
#sys.path.insert(0, PACKAGE_DIR1)
#sys.path.insert(0, os.path.abspath('../GPy'))
print "sys.path:", sys.path
#sys.path.insert(0, os.getcwd() + "/..")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('../GPy'))
#print "sys.path.after:", sys.path
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#sys.path.insert(0, os.path.abspath('./sphinxext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
print "Importing extensions"
extensions = ['sphinx.ext.autodoc',
#'sphinx.ext.doctest'
'sphinx.ext.viewcode',
'sphinx.ext.pngmath'
#'ipython_directive',
#'ipython_console_highlighting.py'
#'matplotlib.sphinxext.mathmpl',
#'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.plot_directive',
]
print "finished importing"
##############################################################################
##
## Mock out imports with C dependencies because ReadTheDocs can't build them.
#############################################################################
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
#import mock
print "Mocking"
MOCK_MODULES = ['pylab', 'matplotlib', 'sympy', 'sympy.utilities', 'sympy.utilities.codegen', 'sympy.core.cache', 'sympy.core', 'sympy.parsing', 'sympy.parsing.sympy_parser']
#'matplotlib', 'matplotlib.color', 'matplotlib.pyplot', 'pylab' ]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# ----------------------- READTHEDOCS ------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
on_rtd = True
if on_rtd:
print "I am here"
sys.path.append(os.path.abspath('../GPy'))
import subprocess
proc = subprocess.Popen("pwd", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
proc = subprocess.Popen("ls ../", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
proc = subprocess.Popen("sphinx-apidoc -f -o . ../GPy", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print "program output:", out
#os.system("cd ..")
#os.system("cd ./docs")
print "Compiled files"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
copyright = u'2013, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# This is to revert to the default theme on readthedocs
html_style = '/default.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GPy.tex', u'GPy Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gpy', u'GPy Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GPy', u'GPy Documentation',
u'Author', 'GPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'GPy'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2013, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
#############################################################################
#
# Include constructors in all the docs
# Got this method from:
# http://stackoverflow.com/questions/5599254/how-to-use-sphinxs-autodoc-to-document-a-classs-init-self-method
#def skip(app, what, name, obj, skip, options):
#if name == "__init__":
#return False
#return skip
#def setup(app):
#app.connect("autodoc-skip-member", skip)
autodoc_member_order = "source"
|
# This file is part of rhizi, a collaborative knowledge graph editor.
# Copyright (C) 2014-2015 Rhizi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import unittest
import os
from ..rz_config import RZ_Config
from ..rz_server import init_webapp
from ..rz_user import rest__user_signup, User_Signup_Request
from .util import gen_random_name, gen_random_user_signup, RhiziTestBase
from .test_util__pydev import debug__pydev_pd_arg
class Test_RZ_User(RhiziTestBase):
def test_user_signup__validate_emails(self):
"""Registration should accept only valid email addresses"""
self.webapp.testing = True
self.webapp.rz_config.access_control = True
us_req = gen_random_user_signup()
with self.webapp.test_client() as test_client:
us_req['email_address'] = "foo@bar"
req, req_data = self._json_post(test_client, '/signup', us_req)
self.assertIn(b"Illegal", req.data)
self.assertIn(b"email address", req.data)
self.assertEqual(400, req.status_code, req_data)
def test_user_signup__acl_domain(self):
"""Email registration should support domains whitelisting"""
self.webapp.testing = True
self.webapp.rz_config.access_control = True
self.webapp.rz_config.acl_wl__email_domain_set = 'a.org, b.org'
us_req = gen_random_user_signup()
with self.webapp.test_client() as test_client:
for email_address, expected_status_code in [('bob@a.org', 200),
('bob@b.org', 200),
('alice@foo.bar', 400),
('bob@c.org', 400)]:
us_req['email_address'] = email_address
req, req_data = self._json_post(test_client, '/signup', us_req)
self.assertEqual(expected_status_code, req.status_code, req_data)
def test_user_signup__whitelist_emails(self):
"""Email registration should support email whitelisting"""
self.webapp.testing = True
self.webapp.rz_config.access_control = True
self.webapp.rz_config.acl_wl__email_domain_set = 'a.org'
self.webapp.rz_config.acl_wl__email_address_set_cached = ['alice@c.org', 'haha@c.org'] # hack: acl_wl__email_address_set_cached attribute access
us_req = gen_random_user_signup()
with self.webapp.test_client() as test_client:
for email_address, expected_status_code in [('haha@c.org', 200), # whitelist specific user
('alice@a.org', 200), # whitelist domain
('roger@test.org', 400)]: # out
us_req['email_address'] = email_address
req, req_data = self._json_post(test_client, '/signup', us_req)
"""Email registration should support email whitelisting using file"""
def test_user_signup__whitelist_emails_file(self):
self.webapp.testing = True
self.webapp.rz_config.access_control = True
self.webapp.rz_config.acl_wl__email_address_set_cached = [] # init clean
self.webapp.rz_config.acl_wl__email_address_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'emails.txt')
self.webapp.rz_config.acl_wl__email_address_set_cached = ['alice@c.org']
us_req = gen_random_user_signup()
with self.webapp.test_client() as test_client:
for email_address, expected_status_code in [('joe@test.org', 200), # whitelisted in file
('jane@test.org', 200), # whitelisted in file
('someone@domain.org', 200), # whitelisted in file
('alice@c.org', 200), # whitelisted in cache
('roger@foo.bar', 400)]: # not whitelisted
us_req['email_address'] = email_address
req, req_data = self._json_post(test_client, '/signup', us_req)
self.assertEqual(expected_status_code, req.status_code, req_data)
self.assertEqual(expected_status_code, req.status_code, req_data)
@debug__pydev_pd_arg
def main():
unittest.main(defaultTest='Test_RZ_User.test_user_signup__acl_domain', verbosity=2)
if __name__ == "__main__":
main()
rhizi/test/test_rz_API_user: remove test of removed feature acl_wl__email_address_set_cached
# This file is part of rhizi, a collaborative knowledge graph editor.
# Copyright (C) 2014-2015 Rhizi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import unittest
import os
from ..rz_config import RZ_Config
from ..rz_server import init_webapp
from ..rz_user import rest__user_signup, User_Signup_Request
from .util import gen_random_name, gen_random_user_signup, RhiziTestBase
from .test_util__pydev import debug__pydev_pd_arg
class Test_RZ_User(RhiziTestBase):
def test_user_signup__validate_emails(self):
"""Registration should accept only valid email addresses"""
self.webapp.testing = True
self.webapp.rz_config.access_control = True
us_req = gen_random_user_signup()
with self.webapp.test_client() as test_client:
us_req['email_address'] = "foo@bar"
req, req_data = self._json_post(test_client, '/signup', us_req)
self.assertIn(b"Illegal", req.data)
self.assertIn(b"email address", req.data)
self.assertEqual(400, req.status_code, req_data)
def test_user_signup__acl_domain(self):
"""Email registration should support domains whitelisting"""
self.webapp.testing = True
self.webapp.rz_config.access_control = True
self.webapp.rz_config.acl_wl__email_domain_set = 'a.org, b.org'
us_req = gen_random_user_signup()
with self.webapp.test_client() as test_client:
for email_address, expected_status_code in [('bob@a.org', 200),
('bob@b.org', 200),
('alice@foo.bar', 400),
('bob@c.org', 400)]:
us_req['email_address'] = email_address
req, req_data = self._json_post(test_client, '/signup', us_req)
self.assertEqual(expected_status_code, req.status_code, req_data)
def test_user_signup__whitelist_emails(self):
"""Email registration should support email whitelisting"""
self.webapp.testing = True
self.webapp.rz_config.access_control = True
self.webapp.rz_config.acl_wl__email_domain_set = 'a.org'
self.webapp.rz_config.acl_wl__email_address_set_cached = ['alice@c.org', 'haha@c.org'] # hack: acl_wl__email_address_set_cached attribute access
us_req = gen_random_user_signup()
with self.webapp.test_client() as test_client:
for email_address, expected_status_code in [('haha@c.org', 200), # whitelist specific user
('alice@a.org', 200), # whitelist domain
('roger@test.org', 400)]: # out
us_req['email_address'] = email_address
req, req_data = self._json_post(test_client, '/signup', us_req)
"""Email registration should support email whitelisting using file"""
def test_user_signup__whitelist_emails_file(self):
self.webapp.testing = True
self.webapp.rz_config.access_control = True
self.webapp.rz_config.acl_wl__email_address_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'emails.txt')
us_req = gen_random_user_signup()
with self.webapp.test_client() as test_client:
for email_address, expected_status_code in [('joe@test.org', 200), # whitelisted in file
('jane@test.org', 200), # whitelisted in file
('someone@domain.org', 200), # whitelisted in file
('roger@foo.bar', 400)]: # not whitelisted
us_req['email_address'] = email_address
req, req_data = self._json_post(test_client, '/signup', us_req)
self.assertEqual(expected_status_code, req.status_code, req_data)
self.assertEqual(expected_status_code, req.status_code, req_data)
@debug__pydev_pd_arg
def main():
unittest.main(defaultTest='Test_RZ_User.test_user_signup__acl_domain', verbosity=2)
if __name__ == "__main__":
main()
|
# Yith Library Server is a password storage server.
# Copyright (C) 2012 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import optparse
import textwrap
import sys
from pyramid.paster import bootstrap
from yithlibraryserver.user.accounts import get_available_providers
from yithlibraryserver.user.accounts import get_n_passwords
def _get_user_info(db, user):
return {
'display_name': '%s %s <%s>' % (
user.get('first_name', ''),
user.get('last_name', ''),
user.get('email', '')),
'passwords': get_n_passwords(db, user),
'providers': ', '.join([prov for prov in get_available_providers()
if ('%s_id' % prov) in user]),
'verified': user.get('email_verified', False),
}
def usage():
description = "Report users and their password number."
usage = "usage: %prog config_uri"
parser = optparse.OptionParser(
usage=usage,
description=textwrap.dedent(description)
)
options, args = parser.parse_args(sys.argv[1:])
if not len(args) >= 1:
print('You must provide at least one argument')
return 2
config_uri = args[0]
env = bootstrap(config_uri)
settings, closer = env['registry'].settings, env['closer']
try:
db = settings['mongodb'].get_database()
for user in db.users.find():
info = _get_user_info(db, user)
print('%s (%s)\n'
'\tPasswords: %d\n'
'\tProviders: %s\n'
'\tVerified: %s\n' % (
info['display_name'], user['_id'],
info['passwords'], info['providers'], info['verified'],
))
finally:
closer()
Reword the command description
# Yith Library Server is a password storage server.
# Copyright (C) 2012 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import optparse
import textwrap
import sys
from pyramid.paster import bootstrap
from yithlibraryserver.user.accounts import get_available_providers
from yithlibraryserver.user.accounts import get_n_passwords
def _get_user_info(db, user):
return {
'display_name': '%s %s <%s>' % (
user.get('first_name', ''),
user.get('last_name', ''),
user.get('email', '')),
'passwords': get_n_passwords(db, user),
'providers': ', '.join([prov for prov in get_available_providers()
if ('%s_id' % prov) in user]),
'verified': user.get('email_verified', False),
}
def usage():
description = "Report information about users and their passwords."
usage = "usage: %prog config_uri"
parser = optparse.OptionParser(
usage=usage,
description=textwrap.dedent(description)
)
options, args = parser.parse_args(sys.argv[1:])
if not len(args) >= 1:
print('You must provide at least one argument')
return 2
config_uri = args[0]
env = bootstrap(config_uri)
settings, closer = env['registry'].settings, env['closer']
try:
db = settings['mongodb'].get_database()
for user in db.users.find():
info = _get_user_info(db, user)
print('%s (%s)\n'
'\tPasswords: %d\n'
'\tProviders: %s\n'
'\tVerified: %s\n' % (
info['display_name'], user['_id'],
info['passwords'], info['providers'], info['verified'],
))
finally:
closer()
|
#!/usr/bin/env python
BUILD = 54
VERSION = "0.9.9"
RELEASE = VERSION + "r" + str(BUILD)
Update version number.
#!/usr/bin/env python
BUILD = 1
VERSION = "1.0.0"
RELEASE = VERSION + "r" + str(BUILD)
|
from terms import *
##
# 12.04.2007, c
class IntegrateVolumeTerm( Term ):
r""":definition: $\int_\Omega y$"""
name = 'd_volume_integrate'
arg_types = ('parameter',)
geometry = [(Volume, 'parameter')]
use_caches = {'state_in_volume_qp' : [['parameter']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# created: 12.04.2007
# last revision: 21.12.2007
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
par, = self.get_args( **kwargs )
ap, vg = par.get_approximation( self.get_current_group(), 'Volume' )
shape = (chunk_size, 1, 1, 1)
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec = cache( 'state', self.get_current_group(), 0, state = par )
for out, chunk in self.char_fun( chunk_size, shape ):
status = vg.integrate_chunk( out, vec[chunk], chunk )
out1 = nm.sum( out )
yield out1, chunk, status
##
# 01.11.2007, c
class IntegrateVolumeOperatorTerm( Term ):
r""":definition: $\int_\Omega q$"""
name = 'dw_volume_integrate'
arg_types = ('virtual',)
geometry = [(Volume, 'virtual')]
##
# 01.11.2007, c
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# created: 01.11.2007
# last revision: 21.12.2007
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
virtual, = self.get_args( **kwargs )
ap, vg = virtual.get_approximation( self.get_current_group(), 'Volume' )
n_el, n_qp, dim, n_ep = ap.get_v_data_shape( self.integral_name )
if diff_var is None:
shape = (chunk_size, 1, n_ep, 1 )
mode = 0
else:
raise StopIteration
bf = ap.get_base( 'v', 0, self.integral_name )
for out, chunk in self.char_fun( chunk_size, shape ):
bf_t = nm.tile( bf.transpose( (0, 2, 1) ), (chunk.shape[0], 1, 1, 1) )
status = vg.integrate_chunk( out, bf_t, chunk )
yield out, chunk, 0
##
# 24.04.2007, c
class IntegrateSurfaceTerm( Term ):
r""":definition: $\int_\Gamma y$, for vectors: $\int_\Gamma \ul{y} \cdot
\ul{n}$"""
name = 'd_surface_integrate'
arg_types = ('parameter',)
geometry = [(Surface, 'parameter')]
use_caches = {'state_in_surface_qp' : [['parameter']]}
##
# 24.04.2007, c
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
self.dof_conn_type = 'surface'
##
# c: 24.04.2007, r: 15.01.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
"""
Integrates over surface.
"""
par, = self.get_args( **kwargs )
ap, sg = par.get_approximation( self.get_current_group(), 'Surface' )
shape = (chunk_size, 1, 1, 1)
sd = ap.surface_data[self.region.name]
cache = self.get_cache( 'state_in_surface_qp', 0 )
vec = cache( 'state', self.get_current_group(), 0, state = par )
for out, chunk in self.char_fun( chunk_size, shape ):
lchunk = self.char_fun.get_local_chunk()
status = sg.integrate_chunk( out, vec[lchunk], lchunk, 0 )
out1 = nm.sum( out )
yield out1, chunk, status
##
# 26.09.2007, c
class DotProductVolumeTerm( Term ):
r""":description: Volume $L^2(\Omega)$ dot product for both scalar and
vector fields.
:definition: $\int_\Omega p r$, $\int_\Omega \ul{u} \cdot \ul{w}$"""
name = 'd_volume_dot'
arg_types = ('parameter_1', 'parameter_2')
geometry = [(Volume, 'parameter_1'), (Volume, 'parameter_2')]
use_caches = {'state_in_volume_qp' : [['parameter_1'], ['parameter_2']]}
##
# 26.09.2007, c
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# created: 26.09.2007
# last revision: 13.12.2007
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
par1, par2 = self.get_args( **kwargs )
ap, vg = par1.get_approximation( self.get_current_group(), 'Volume' )
shape = (chunk_size, 1, 1, 1)
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec1 = cache( 'state', self.get_current_group(), 0, state = par1 )
cache = self.get_cache( 'state_in_volume_qp', 1 )
vec2 = cache( 'state', self.get_current_group(), 0, state = par2 )
for out, chunk in self.char_fun( chunk_size, shape ):
if vec1.shape[-1] > 1:
vec = nm.sum( vec1[chunk] * vec2[chunk], axis = -1 )
else:
vec = vec1[chunk] * vec2[chunk]
status = vg.integrate_chunk( out, vec, chunk )
out1 = nm.sum( out )
yield out1, chunk, status
##
# 09.10.2007, c
class DotProductSurfaceTerm( Term ):
r""":description: Surface $L^2(\Gamma)$ dot product for both scalar and
vector fields.
:definition: $\int_\Gamma p r$, $\int_\Gamma \ul{u} \cdot \ul{w}$"""
name = 'd_surface_dot'
arg_types = ('parameter_1', 'parameter_2')
geometry = [(Surface, 'parameter_1'), (Surface, 'parameter_2')]
use_caches = {'state_in_surface_qp' : [['parameter_1'], ['parameter_2']]}
##
# 09.10.2007, c
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# c: 09.10.2007, r: 15.01.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
par1, par2 = self.get_args( **kwargs )
ap, sg = par1.get_approximation( self.get_current_group(), 'Surface' )
shape = (chunk_size, 1, 1, 1)
cache = self.get_cache( 'state_in_surface_qp', 0 )
vec1 = cache( 'state', self.get_current_group(), 0, state = par1 )
cache = self.get_cache( 'state_in_surface_qp', 1 )
vec2 = cache( 'state', self.get_current_group(), 0, state = par2 )
for out, chunk in self.char_fun( chunk_size, shape ):
lchunk = self.char_fun.get_local_chunk()
if vec1.shape[-1] > 1:
vec = nm.sum( vec1[lchunk] * vec2[lchunk], axis = -1 )
else:
vec = vec1[lchunk] * vec2[lchunk]
status = sg.integrate_chunk( out, vec, lchunk, 0 )
out1 = nm.sum( out )
yield out1, chunk, status
##
# 30.06.2008, c
class IntegrateSurfaceOperatorTerm( Term ):
r""":definition: $\int_{\Gamma} q$"""
name = 'dw_surface_integrate'
arg_types = ('material', 'virtual',)
geometry = [(Surface, 'virtual')]
##
# 30.06.2008, c
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
self.dof_conn_type = 'surface'
##
# 30.06.2008, c
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat, virtual, = self.get_args( **kwargs )
ap, sg = virtual.get_approximation( self.get_current_group(), 'Surface' )
if diff_var is None:
shape = (chunk_size, 1, sg.n_fp, 1 )
else:
raise StopIteration
sd = ap.surface_data[self.region.name]
bf = ap.get_base( sd.face_type, 0, self.integral_name )
for out, chunk in self.char_fun( chunk_size, shape ):
lchunk = self.char_fun.get_local_chunk()
bf_t = nm.tile( bf.transpose( (0, 2, 1) ), (chunk.shape[0], 1, 1, 1) )
status = sg.integrate_chunk( out, bf_t, lchunk, 1 )
out = out*mat
yield out, lchunk, 0
##
# 16.07.2007, c
class VolumeTerm( Term ):
r""":description: Volume of a domain. Uses approximation of the parameter
variable.
:definition: $\int_\Omega 1$"""
name = 'd_volume'
arg_types = ('parameter',)
geometry = [(Volume, 'parameter')]
use_caches = {'volume' : [['parameter']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# created: 16.07.2007
# last revision: 13.12.2007
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
par, = self.get_args( **kwargs )
shape = (1, 1, 1, 1)
cache = self.get_cache( 'volume', 0 )
volume = cache( 'volume', self.get_current_group(), 0,
region = self.char_fun.region, field = par.field )
yield volume, 0, 0
##
# c: 05.03.2008, r: 05.03.2008
def fix_mat_qp_shape( mat_qp, n_el ):
if mat_qp.ndim == 3:
mat_qp = mat_qp[...,nm.newaxis]
if mat_qp.shape[0] == 1:
mat_qp = nm.tile( mat_qp, (n_el, 1, 1, 1) )
return mat_qp
##
# c: 06.05.2008
class AverageVolumeMatTerm( Term ):
r""":description: Material parameter $m$ averaged in elements. Uses
approximation of $y$ variable.
:definition: $\forall K \in \Tcal_h: \int_{T_K} m / \int_{T_K} 1$
:arguments: material : $m$ (can have up to two dimensions),
parameter : $y$, shape : shape of material parameter
parameter, mode : 'const' or 'vertex' or 'element_avg'
"""
name = 'de_volume_average_mat'
arg_types = ('material', 'parameter', 'shape', 'mode')
geometry = [(Volume, 'parameter')]
use_caches = {'mat_in_qp' : [['material']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# c: 06.05.2008, r: 06.05.2008
def prepare_data( self, chunk_size = None, **kwargs ):
mat, par, mat_shape, mode = self.get_args( **kwargs )
ap, vg = par.get_approximation( self.get_current_group(), 'Volume' )
n_el, n_qp, dim, n_ep = ap.get_v_data_shape( self.integral_name )
cache = self.get_cache( 'mat_in_qp', 0 )
mat_qp = cache( 'matqp', self.get_current_group(), 0,
mat = mat, ap = ap,
assumed_shapes = [(1, n_qp) + mat_shape,
(n_el, n_qp) + mat_shape],
mode_in = mode )
mat_qp = fix_mat_qp_shape( mat_qp, chunk_size )
shape = (chunk_size, 1) + mat_qp.shape[2:]
return vg, mat_qp, shape
##
# c: 06.05.2008, r: 14.07.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
vg, mat_qp, shape = self.prepare_data( chunk_size, **kwargs )
for out, chunk in self.char_fun( chunk_size, shape ):
status = vg.integrate_chunk( out, mat_qp[chunk], chunk )
out1 = out / vg.variable( 2 )[chunk]
yield out1, chunk, status
##
# c: 05.03.2008
class IntegrateVolumeMatTerm( AverageVolumeMatTerm ):
r""":description: Integrate material parameter $m$ over a domain. Uses
approximation of $y$ variable.
:definition: $\int_\Omega m$
:arguments: material : $m$ (can have up to two dimensions),
parameter : $y$, shape : shape of material parameter
parameter, mode : 'const' or 'vertex' or 'element_avg'
"""
name = 'di_volume_integrate_mat'
##
# c: 05.03.2008, r: 06.05.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat_shape, = self.get_args( ['shape'], **kwargs )
vg, mat_qp, shape = self.prepare_data( chunk_size, **kwargs )
for out, chunk in self.char_fun( chunk_size, shape ):
status = vg.integrate_chunk( out, mat_qp[chunk], chunk )
out1 = nm.sum( out, 0 )
out1.shape = mat_shape
yield out1, chunk, status
##
# c: 05.03.2008
class WDotProductVolumeTerm( Term ):
r""":description: Volume $L^2(\Omega)$ weighted dot product for both scalar
and vector fields.
:definition: $\int_\Omega y p r$, $\int_\Omega y \ul{u} \cdot \ul{w}$
:arguments: material : weight function $y$"""
name = 'd_volume_wdot'
arg_types = ('material', 'parameter_1', 'parameter_2')
geometry = [(Volume, 'parameter_1'), (Volume, 'parameter_2')]
use_caches = {'state_in_volume_qp' : [['parameter_1'], ['parameter_2']],
'mat_in_qp' : [['material']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# c: 05.03.2008, r: 05.03.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat, par1, par2 = self.get_args( **kwargs )
ap, vg = par1.get_approximation( self.get_current_group(), 'Volume' )
n_el, n_qp, dim, n_ep = ap.get_v_data_shape( self.integral_name )
shape = (chunk_size, 1, 1, 1)
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec1 = cache( 'state', self.get_current_group(), 0, state = par1 )
cache = self.get_cache( 'state_in_volume_qp', 1 )
vec2 = cache( 'state', self.get_current_group(), 0, state = par2 )
if mat.ndim == 1:
mat = mat[...,nm.newaxis]
cache = self.get_cache( 'mat_in_qp', 0 )
mat_qp = cache( 'matqp', self.get_current_group(), 0,
mat = mat, ap = ap,
assumed_shapes = [(1, n_qp, 1, 1),
(n_el, n_qp, 1, 1)],
mode_in = None )
mat_qp = fix_mat_qp_shape( mat_qp, chunk_size )
for out, chunk in self.char_fun( chunk_size, shape ):
if vec1.shape[-1] > 1:
vec = mat_qp[chunk] * nm.sum( vec1[chunk] * vec2[chunk],
axis = -1 )
else:
vec = mat_qp[chunk] * vec1[chunk] * vec2[chunk]
status = vg.integrate_chunk( out, vec, chunk )
out1 = nm.sum( out )
yield out1, chunk, status
##
# c: 05.03.2008
class WDotProductVolumeOperatorTerm( Term ):
r""":description: Volume $L^2(\Omega)$ weighted dot product operator for
scalar and vector (not implemented!) fields.
:definition: $\int_\Omega y q p$, $\int_\Omega y \ul{v} \cdot \ul{u}$
:arguments: material : weight function $y$"""
name = 'dw_volume_wdot'
arg_types = ('material', 'virtual', 'state')
geometry = [(Volume, 'virtual'), (Volume, 'state')]
use_caches = {'state_in_volume_qp' : [['state']],
'mat_in_qp' : [['material']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# c: 05.03.2008, r: 05.03.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat, virtual, state = self.get_args( **kwargs )
ap, vg = virtual.get_approximation( self.get_current_group(), 'Volume' )
n_el, n_qp, dim, n_ep = ap.get_v_data_shape( self.integral_name )
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec = cache( 'state', self.get_current_group(), 0, state = state )
vdim = vec.shape[-1]
if diff_var is None:
shape = (chunk_size, 1, vdim * n_ep, 1)
mode = 0
elif diff_var == self.get_arg_name( 'state' ):
shape = (chunk_size, 1, vdim * n_ep, vdim * n_ep)
mode = 1
else:
raise StopIteration
if mat.ndim == 1:
mat = mat[...,nm.newaxis]
cache = self.get_cache( 'mat_in_qp', 0 )
mat_qp = cache( 'matqp', self.get_current_group(), 0,
mat = mat, ap = ap,
assumed_shapes = [(1, n_qp, 1, 1),
(n_el, n_qp, 1, 1)],
mode_in = None )
mat_qp = fix_mat_qp_shape( mat_qp, chunk_size )
bf = ap.get_base( 'v', 0, self.integral_name )
bf_t = bf.transpose( (0, 2, 1) )
for out, chunk in self.char_fun( chunk_size, shape ):
if vdim > 1:
raise NotImplementedError
else:
if mode == 0:
vec = bf_t * mat_qp[chunk] * vec[chunk]
else:
vec = bf_t * mat_qp[chunk] * bf
status = vg.integrate_chunk( out, vec, chunk )
yield out, chunk, status
##
# c: 02.04.2008
class WDotProductVolumeOperatorDtTerm( WDotProductVolumeOperatorTerm ):
r""":description: Volume $L^2(\Omega)$ weighted dot product operator for
scalar and vector (not implemented!) fields.
:definition: $\int_\Omega y q \frac{p - p_0}{\dt}$,
$\int_\Omega y \ul{v} \cdot \frac{\ul{u} - \ul{u}_0}{\dt}$
:arguments: material : weight function $y$"""
name = 'dw_volume_wdot_dt'
arg_types = ('ts', 'material', 'virtual', 'state', 'parameter')
geometry = [(Volume, 'virtual'), (Volume, 'state')]
use_caches = {'state_in_volume_qp' : [['state', {'state' : (2,2)}]],
'mat_in_qp' : [['material']]}
##
# c: 02.04.2008, r: 04.04.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
ts, mat, virtual, state, par = self.get_args( **kwargs )
ap, vg = virtual.get_approximation( self.get_current_group(), 'Volume' )
n_el, n_qp, dim, n_ep = ap.get_v_data_shape( self.integral_name )
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec = cache( 'state', self.get_current_group(), 0,
state = state, history = par )
if ts.step > 0:
vec0 = cache( 'state', self.get_current_group(), 1,
state = state, history = par )
dvec = (vec - vec0) / ts.dt
vdim = vec.shape[-1]
if diff_var is None:
shape = (chunk_size, 1, vdim * n_ep, 1)
mode = 0
elif diff_var == self.get_arg_name( 'state' ):
shape = (chunk_size, 1, vdim * n_ep, vdim * n_ep)
mode = 1
else:
raise StopIteration
if (ts.step == 0) and (mode == 0):
raise StopIteration
if mat.ndim == 1:
mat = mat[...,nm.newaxis]
cache = self.get_cache( 'mat_in_qp', 0 )
mat_qp = cache( 'matqp', self.get_current_group(), 0,
mat = mat, ap = ap,
assumed_shapes = [(1, n_qp, 1, 1),
(n_el, n_qp, 1, 1)],
mode_in = None )
mat_qp = fix_mat_qp_shape( mat_qp, chunk_size )
bf = ap.get_base( 'v', 0, self.integral_name )
bf_t = bf.transpose( (0, 2, 1) )
for out, chunk in self.char_fun( chunk_size, shape ):
if vdim > 1:
raise NotImplementedError
else:
if mode == 0:
vec = bf_t * mat_qp[chunk] * dvec[chunk]
else:
vec = bf_t * mat_qp[chunk] * bf / ts.dt
status = vg.integrate_chunk( out, vec, chunk )
yield out, chunk, status
##
# c: 03.04.2008
class WDotProductVolumeOperatorTHTerm( Term ):
r""":definition: $\int_\Omega \left [\int_0^t \Gcal(t-\tau) p(\tau)
\difd{\tau} \right] q$"""
name = 'dw_volume_wdot_th'
arg_types = ('ts', 'material', 'virtual', 'state', 'parameter')
geometry = [(Volume, 'virtual'), (Volume, 'state')]
use_caches = {'state_in_volume_qp' : [['state', {'state' : (-1,-1)}]]}
##
# c: 03.04.2008, r: 03.04.2008
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign, terms.dw_volume_wdot_scalar )
##
# c: 03.04.2008, r: 03.04.2008
def get_shape( self, diff_var, chunk_size, apr, apc = None ):
self.data_shape = apr.get_v_data_shape( self.integral_name )
n_el, n_qp, dim, n_ep = self.data_shape
if diff_var is None:
return (chunk_size, 1, n_ep, 1), 0
elif diff_var == self.get_arg_name( 'state' ):
return (chunk_size, 1, n_ep, n_ep), 1
else:
raise StopIteration
##
# c: 03.04.2008, r: 18.06.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
ts, mats, virtual, state, history = self.get_args( **kwargs )
ap, vg = virtual.get_approximation( self.get_current_group(), 'Volume' )
shape, mode = self.get_shape( diff_var, chunk_size, ap )
n_el, n_qp, dim, n_ep = self.data_shape
if (ts.step == 0) and (mode == 0):
raise StopIteration
bf = ap.get_base( 'v', 0, self.integral_name )
if mode == 1:
mat_qp = mats[0][nm.newaxis,:,nm.newaxis].repeat( n_qp, 0 )
for out, chunk in self.char_fun( chunk_size, shape ):
status = self.function( out, ts.dt, nm.empty( 0 ), bf,
mat_qp, vg, chunk, 1 )
yield out, chunk, status
else:
cache = self.get_cache( 'state_in_volume_qp', 0 )
for out, chunk in self.char_fun( chunk_size, shape, zero = True ):
out1 = nm.empty_like( out )
for ii, mat in enumerate( mats ):
mat_qp = mat[nm.newaxis,:,nm.newaxis].repeat( n_qp, 0 )
vec_qp = cache( 'state', self.get_current_group(), ii,
state = state, history = history )
status = self.function( out1, ts.dt, vec_qp, bf,
mat_qp, vg, chunk, 0 )
out += out1
yield out, chunk, status
class AverageVariableTerm( Term ):
r""":description: Variable $y$ averaged in elements.
:definition: vector of $\forall K \in \Tcal_h: \int_{T_K} y /
\int_{T_K} 1$"""
name = 'de_average_variable'
arg_types = ('parameter',)
geometry = [(Volume, 'parameter')]
use_caches = {'state_in_volume_qp' : [['parameter']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
par, = self.get_args( **kwargs )
ap, vg = par.get_approximation( self.get_current_group(), 'Volume' )
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec = cache( 'state', self.get_current_group(), 0, state = par )
vdim = vec.shape[-1]
shape = (chunk_size, 1, vdim, 1)
for out, chunk in self.char_fun( chunk_size, shape ):
status = vg.integrate_chunk( out, vec[chunk], chunk )
out1 = out / vg.variable( 2 )[chunk]
yield out1, chunk, status
fixed for new naming convention
from terms import *
##
# 12.04.2007, c
class IntegrateVolumeTerm( Term ):
r""":definition: $\int_\Omega y$"""
name = 'd_volume_integrate'
arg_types = ('parameter',)
geometry = [(Volume, 'parameter')]
use_caches = {'state_in_volume_qp' : [['parameter']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# created: 12.04.2007
# last revision: 21.12.2007
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
par, = self.get_args( **kwargs )
ap, vg = par.get_approximation( self.get_current_group(), 'Volume' )
shape = (chunk_size, 1, 1, 1)
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec = cache( 'state', self.get_current_group(), 0, state = par )
for out, chunk in self.char_fun( chunk_size, shape ):
status = vg.integrate_chunk( out, vec[chunk], chunk )
out1 = nm.sum( out )
yield out1, chunk, status
##
# 01.11.2007, c
class IntegrateVolumeOperatorTerm( Term ):
r""":definition: $\int_\Omega q$"""
name = 'dw_volume_integrate'
arg_types = ('virtual',)
geometry = [(Volume, 'virtual')]
##
# 01.11.2007, c
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# created: 01.11.2007
# last revision: 21.12.2007
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
virtual, = self.get_args( **kwargs )
ap, vg = virtual.get_approximation( self.get_current_group(), 'Volume' )
n_el, n_qp, dim, n_ep = ap.get_v_data_shape( self.integral_name )
if diff_var is None:
shape = (chunk_size, 1, n_ep, 1 )
mode = 0
else:
raise StopIteration
bf = ap.get_base( 'v', 0, self.integral_name )
for out, chunk in self.char_fun( chunk_size, shape ):
bf_t = nm.tile( bf.transpose( (0, 2, 1) ), (chunk.shape[0], 1, 1, 1) )
status = vg.integrate_chunk( out, bf_t, chunk )
yield out, chunk, 0
##
# 24.04.2007, c
class IntegrateSurfaceTerm( Term ):
r""":definition: $\int_\Gamma y$, for vectors: $\int_\Gamma \ul{y} \cdot
\ul{n}$"""
name = 'd_surface_integrate'
arg_types = ('parameter',)
geometry = [(Surface, 'parameter')]
use_caches = {'state_in_surface_qp' : [['parameter']]}
##
# 24.04.2007, c
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
self.dof_conn_type = 'surface'
##
# c: 24.04.2007, r: 15.01.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
"""
Integrates over surface.
"""
par, = self.get_args( **kwargs )
ap, sg = par.get_approximation( self.get_current_group(), 'Surface' )
shape = (chunk_size, 1, 1, 1)
sd = ap.surface_data[self.region.name]
cache = self.get_cache( 'state_in_surface_qp', 0 )
vec = cache( 'state', self.get_current_group(), 0, state = par )
for out, chunk in self.char_fun( chunk_size, shape ):
lchunk = self.char_fun.get_local_chunk()
status = sg.integrate_chunk( out, vec[lchunk], lchunk, 0 )
out1 = nm.sum( out )
yield out1, chunk, status
##
# 26.09.2007, c
class DotProductVolumeTerm( Term ):
r""":description: Volume $L^2(\Omega)$ dot product for both scalar and
vector fields.
:definition: $\int_\Omega p r$, $\int_\Omega \ul{u} \cdot \ul{w}$"""
name = 'd_volume_dot'
arg_types = ('parameter_1', 'parameter_2')
geometry = [(Volume, 'parameter_1'), (Volume, 'parameter_2')]
use_caches = {'state_in_volume_qp' : [['parameter_1'], ['parameter_2']]}
##
# 26.09.2007, c
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# created: 26.09.2007
# last revision: 13.12.2007
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
par1, par2 = self.get_args( **kwargs )
ap, vg = par1.get_approximation( self.get_current_group(), 'Volume' )
shape = (chunk_size, 1, 1, 1)
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec1 = cache( 'state', self.get_current_group(), 0, state = par1 )
cache = self.get_cache( 'state_in_volume_qp', 1 )
vec2 = cache( 'state', self.get_current_group(), 0, state = par2 )
for out, chunk in self.char_fun( chunk_size, shape ):
if vec1.shape[-1] > 1:
vec = nm.sum( vec1[chunk] * vec2[chunk], axis = -1 )
else:
vec = vec1[chunk] * vec2[chunk]
status = vg.integrate_chunk( out, vec, chunk )
out1 = nm.sum( out )
yield out1, chunk, status
##
# 09.10.2007, c
class DotProductSurfaceTerm( Term ):
r""":description: Surface $L^2(\Gamma)$ dot product for both scalar and
vector fields.
:definition: $\int_\Gamma p r$, $\int_\Gamma \ul{u} \cdot \ul{w}$"""
name = 'd_surface_dot'
arg_types = ('parameter_1', 'parameter_2')
geometry = [(Surface, 'parameter_1'), (Surface, 'parameter_2')]
use_caches = {'state_in_surface_qp' : [['parameter_1'], ['parameter_2']]}
##
# 09.10.2007, c
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# c: 09.10.2007, r: 15.01.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
par1, par2 = self.get_args( **kwargs )
ap, sg = par1.get_approximation( self.get_current_group(), 'Surface' )
shape = (chunk_size, 1, 1, 1)
cache = self.get_cache( 'state_in_surface_qp', 0 )
vec1 = cache( 'state', self.get_current_group(), 0, state = par1 )
cache = self.get_cache( 'state_in_surface_qp', 1 )
vec2 = cache( 'state', self.get_current_group(), 0, state = par2 )
for out, chunk in self.char_fun( chunk_size, shape ):
lchunk = self.char_fun.get_local_chunk()
if vec1.shape[-1] > 1:
vec = nm.sum( vec1[lchunk] * vec2[lchunk], axis = -1 )
else:
vec = vec1[lchunk] * vec2[lchunk]
status = sg.integrate_chunk( out, vec, lchunk, 0 )
out1 = nm.sum( out )
yield out1, chunk, status
##
# 30.06.2008, c
class IntegrateSurfaceOperatorTerm( Term ):
r""":definition: $\int_{\Gamma} q$"""
name = 'dw_surface_integrate'
arg_types = ('material', 'virtual',)
geometry = [(Surface, 'virtual')]
##
# 30.06.2008, c
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
self.dof_conn_type = 'surface'
##
# 30.06.2008, c
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat, virtual, = self.get_args( **kwargs )
ap, sg = virtual.get_approximation( self.get_current_group(), 'Surface' )
n_fa, n_qp, dim, n_fp = ap.get_s_data_shape( self.integral_name,
self.region.name )
if diff_var is None:
shape = (chunk_size, 1, n_fp, 1 )
else:
raise StopIteration
sd = ap.surface_data[self.region.name]
bf = ap.get_base( sd.face_type, 0, self.integral_name )
for out, chunk in self.char_fun( chunk_size, shape ):
lchunk = self.char_fun.get_local_chunk()
bf_t = nm.tile( bf.transpose( (0, 2, 1) ), (chunk.shape[0], 1, 1, 1) )
status = sg.integrate_chunk( out, bf_t, lchunk, 1 )
out = out*mat
yield out, lchunk, 0
##
# 16.07.2007, c
class VolumeTerm( Term ):
r""":description: Volume of a domain. Uses approximation of the parameter
variable.
:definition: $\int_\Omega 1$"""
name = 'd_volume'
arg_types = ('parameter',)
geometry = [(Volume, 'parameter')]
use_caches = {'volume' : [['parameter']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# created: 16.07.2007
# last revision: 13.12.2007
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
par, = self.get_args( **kwargs )
shape = (1, 1, 1, 1)
cache = self.get_cache( 'volume', 0 )
volume = cache( 'volume', self.get_current_group(), 0,
region = self.char_fun.region, field = par.field )
yield volume, 0, 0
##
# c: 05.03.2008, r: 05.03.2008
def fix_mat_qp_shape( mat_qp, n_el ):
if mat_qp.ndim == 3:
mat_qp = mat_qp[...,nm.newaxis]
if mat_qp.shape[0] == 1:
mat_qp = nm.tile( mat_qp, (n_el, 1, 1, 1) )
return mat_qp
##
# c: 06.05.2008
class AverageVolumeMatTerm( Term ):
r""":description: Material parameter $m$ averaged in elements. Uses
approximation of $y$ variable.
:definition: $\forall K \in \Tcal_h: \int_{T_K} m / \int_{T_K} 1$
:arguments: material : $m$ (can have up to two dimensions),
parameter : $y$, shape : shape of material parameter
parameter, mode : 'const' or 'vertex' or 'element_avg'
"""
name = 'de_volume_average_mat'
arg_types = ('material', 'parameter', 'shape', 'mode')
geometry = [(Volume, 'parameter')]
use_caches = {'mat_in_qp' : [['material']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# c: 06.05.2008, r: 06.05.2008
def prepare_data( self, chunk_size = None, **kwargs ):
mat, par, mat_shape, mode = self.get_args( **kwargs )
ap, vg = par.get_approximation( self.get_current_group(), 'Volume' )
n_el, n_qp, dim, n_ep = ap.get_v_data_shape( self.integral_name )
cache = self.get_cache( 'mat_in_qp', 0 )
mat_qp = cache( 'matqp', self.get_current_group(), 0,
mat = mat, ap = ap,
assumed_shapes = [(1, n_qp) + mat_shape,
(n_el, n_qp) + mat_shape],
mode_in = mode )
mat_qp = fix_mat_qp_shape( mat_qp, chunk_size )
shape = (chunk_size, 1) + mat_qp.shape[2:]
return vg, mat_qp, shape
##
# c: 06.05.2008, r: 14.07.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
vg, mat_qp, shape = self.prepare_data( chunk_size, **kwargs )
for out, chunk in self.char_fun( chunk_size, shape ):
status = vg.integrate_chunk( out, mat_qp[chunk], chunk )
out1 = out / vg.variable( 2 )[chunk]
yield out1, chunk, status
##
# c: 05.03.2008
class IntegrateVolumeMatTerm( AverageVolumeMatTerm ):
r""":description: Integrate material parameter $m$ over a domain. Uses
approximation of $y$ variable.
:definition: $\int_\Omega m$
:arguments: material : $m$ (can have up to two dimensions),
parameter : $y$, shape : shape of material parameter
parameter, mode : 'const' or 'vertex' or 'element_avg'
"""
name = 'di_volume_integrate_mat'
##
# c: 05.03.2008, r: 06.05.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat_shape, = self.get_args( ['shape'], **kwargs )
vg, mat_qp, shape = self.prepare_data( chunk_size, **kwargs )
for out, chunk in self.char_fun( chunk_size, shape ):
status = vg.integrate_chunk( out, mat_qp[chunk], chunk )
out1 = nm.sum( out, 0 )
out1.shape = mat_shape
yield out1, chunk, status
##
# c: 05.03.2008
class WDotProductVolumeTerm( Term ):
r""":description: Volume $L^2(\Omega)$ weighted dot product for both scalar
and vector fields.
:definition: $\int_\Omega y p r$, $\int_\Omega y \ul{u} \cdot \ul{w}$
:arguments: material : weight function $y$"""
name = 'd_volume_wdot'
arg_types = ('material', 'parameter_1', 'parameter_2')
geometry = [(Volume, 'parameter_1'), (Volume, 'parameter_2')]
use_caches = {'state_in_volume_qp' : [['parameter_1'], ['parameter_2']],
'mat_in_qp' : [['material']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# c: 05.03.2008, r: 05.03.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat, par1, par2 = self.get_args( **kwargs )
ap, vg = par1.get_approximation( self.get_current_group(), 'Volume' )
n_el, n_qp, dim, n_ep = ap.get_v_data_shape( self.integral_name )
shape = (chunk_size, 1, 1, 1)
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec1 = cache( 'state', self.get_current_group(), 0, state = par1 )
cache = self.get_cache( 'state_in_volume_qp', 1 )
vec2 = cache( 'state', self.get_current_group(), 0, state = par2 )
if mat.ndim == 1:
mat = mat[...,nm.newaxis]
cache = self.get_cache( 'mat_in_qp', 0 )
mat_qp = cache( 'matqp', self.get_current_group(), 0,
mat = mat, ap = ap,
assumed_shapes = [(1, n_qp, 1, 1),
(n_el, n_qp, 1, 1)],
mode_in = None )
mat_qp = fix_mat_qp_shape( mat_qp, chunk_size )
for out, chunk in self.char_fun( chunk_size, shape ):
if vec1.shape[-1] > 1:
vec = mat_qp[chunk] * nm.sum( vec1[chunk] * vec2[chunk],
axis = -1 )
else:
vec = mat_qp[chunk] * vec1[chunk] * vec2[chunk]
status = vg.integrate_chunk( out, vec, chunk )
out1 = nm.sum( out )
yield out1, chunk, status
##
# c: 05.03.2008
class WDotProductVolumeOperatorTerm( Term ):
r""":description: Volume $L^2(\Omega)$ weighted dot product operator for
scalar and vector (not implemented!) fields.
:definition: $\int_\Omega y q p$, $\int_\Omega y \ul{v} \cdot \ul{u}$
:arguments: material : weight function $y$"""
name = 'dw_volume_wdot'
arg_types = ('material', 'virtual', 'state')
geometry = [(Volume, 'virtual'), (Volume, 'state')]
use_caches = {'state_in_volume_qp' : [['state']],
'mat_in_qp' : [['material']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
##
# c: 05.03.2008, r: 05.03.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
mat, virtual, state = self.get_args( **kwargs )
ap, vg = virtual.get_approximation( self.get_current_group(), 'Volume' )
n_el, n_qp, dim, n_ep = ap.get_v_data_shape( self.integral_name )
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec = cache( 'state', self.get_current_group(), 0, state = state )
vdim = vec.shape[-1]
if diff_var is None:
shape = (chunk_size, 1, vdim * n_ep, 1)
mode = 0
elif diff_var == self.get_arg_name( 'state' ):
shape = (chunk_size, 1, vdim * n_ep, vdim * n_ep)
mode = 1
else:
raise StopIteration
if mat.ndim == 1:
mat = mat[...,nm.newaxis]
cache = self.get_cache( 'mat_in_qp', 0 )
mat_qp = cache( 'matqp', self.get_current_group(), 0,
mat = mat, ap = ap,
assumed_shapes = [(1, n_qp, 1, 1),
(n_el, n_qp, 1, 1)],
mode_in = None )
mat_qp = fix_mat_qp_shape( mat_qp, chunk_size )
bf = ap.get_base( 'v', 0, self.integral_name )
bf_t = bf.transpose( (0, 2, 1) )
for out, chunk in self.char_fun( chunk_size, shape ):
if vdim > 1:
raise NotImplementedError
else:
if mode == 0:
vec = bf_t * mat_qp[chunk] * vec[chunk]
else:
vec = bf_t * mat_qp[chunk] * bf
status = vg.integrate_chunk( out, vec, chunk )
yield out, chunk, status
##
# c: 02.04.2008
class WDotProductVolumeOperatorDtTerm( WDotProductVolumeOperatorTerm ):
r""":description: Volume $L^2(\Omega)$ weighted dot product operator for
scalar and vector (not implemented!) fields.
:definition: $\int_\Omega y q \frac{p - p_0}{\dt}$,
$\int_\Omega y \ul{v} \cdot \frac{\ul{u} - \ul{u}_0}{\dt}$
:arguments: material : weight function $y$"""
name = 'dw_volume_wdot_dt'
arg_types = ('ts', 'material', 'virtual', 'state', 'parameter')
geometry = [(Volume, 'virtual'), (Volume, 'state')]
use_caches = {'state_in_volume_qp' : [['state', {'state' : (2,2)}]],
'mat_in_qp' : [['material']]}
##
# c: 02.04.2008, r: 04.04.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
ts, mat, virtual, state, par = self.get_args( **kwargs )
ap, vg = virtual.get_approximation( self.get_current_group(), 'Volume' )
n_el, n_qp, dim, n_ep = ap.get_v_data_shape( self.integral_name )
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec = cache( 'state', self.get_current_group(), 0,
state = state, history = par )
if ts.step > 0:
vec0 = cache( 'state', self.get_current_group(), 1,
state = state, history = par )
dvec = (vec - vec0) / ts.dt
vdim = vec.shape[-1]
if diff_var is None:
shape = (chunk_size, 1, vdim * n_ep, 1)
mode = 0
elif diff_var == self.get_arg_name( 'state' ):
shape = (chunk_size, 1, vdim * n_ep, vdim * n_ep)
mode = 1
else:
raise StopIteration
if (ts.step == 0) and (mode == 0):
raise StopIteration
if mat.ndim == 1:
mat = mat[...,nm.newaxis]
cache = self.get_cache( 'mat_in_qp', 0 )
mat_qp = cache( 'matqp', self.get_current_group(), 0,
mat = mat, ap = ap,
assumed_shapes = [(1, n_qp, 1, 1),
(n_el, n_qp, 1, 1)],
mode_in = None )
mat_qp = fix_mat_qp_shape( mat_qp, chunk_size )
bf = ap.get_base( 'v', 0, self.integral_name )
bf_t = bf.transpose( (0, 2, 1) )
for out, chunk in self.char_fun( chunk_size, shape ):
if vdim > 1:
raise NotImplementedError
else:
if mode == 0:
vec = bf_t * mat_qp[chunk] * dvec[chunk]
else:
vec = bf_t * mat_qp[chunk] * bf / ts.dt
status = vg.integrate_chunk( out, vec, chunk )
yield out, chunk, status
##
# c: 03.04.2008
class WDotProductVolumeOperatorTHTerm( Term ):
r""":definition: $\int_\Omega \left [\int_0^t \Gcal(t-\tau) p(\tau)
\difd{\tau} \right] q$"""
name = 'dw_volume_wdot_th'
arg_types = ('ts', 'material', 'virtual', 'state', 'parameter')
geometry = [(Volume, 'virtual'), (Volume, 'state')]
use_caches = {'state_in_volume_qp' : [['state', {'state' : (-1,-1)}]]}
##
# c: 03.04.2008, r: 03.04.2008
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign, terms.dw_volume_wdot_scalar )
##
# c: 03.04.2008, r: 03.04.2008
def get_shape( self, diff_var, chunk_size, apr, apc = None ):
self.data_shape = apr.get_v_data_shape( self.integral_name )
n_el, n_qp, dim, n_ep = self.data_shape
if diff_var is None:
return (chunk_size, 1, n_ep, 1), 0
elif diff_var == self.get_arg_name( 'state' ):
return (chunk_size, 1, n_ep, n_ep), 1
else:
raise StopIteration
##
# c: 03.04.2008, r: 18.06.2008
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
ts, mats, virtual, state, history = self.get_args( **kwargs )
ap, vg = virtual.get_approximation( self.get_current_group(), 'Volume' )
shape, mode = self.get_shape( diff_var, chunk_size, ap )
n_el, n_qp, dim, n_ep = self.data_shape
if (ts.step == 0) and (mode == 0):
raise StopIteration
bf = ap.get_base( 'v', 0, self.integral_name )
if mode == 1:
mat_qp = mats[0][nm.newaxis,:,nm.newaxis].repeat( n_qp, 0 )
for out, chunk in self.char_fun( chunk_size, shape ):
status = self.function( out, ts.dt, nm.empty( 0 ), bf,
mat_qp, vg, chunk, 1 )
yield out, chunk, status
else:
cache = self.get_cache( 'state_in_volume_qp', 0 )
for out, chunk in self.char_fun( chunk_size, shape, zero = True ):
out1 = nm.empty_like( out )
for ii, mat in enumerate( mats ):
mat_qp = mat[nm.newaxis,:,nm.newaxis].repeat( n_qp, 0 )
vec_qp = cache( 'state', self.get_current_group(), ii,
state = state, history = history )
status = self.function( out1, ts.dt, vec_qp, bf,
mat_qp, vg, chunk, 0 )
out += out1
yield out, chunk, status
class AverageVariableTerm( Term ):
r""":description: Variable $y$ averaged in elements.
:definition: vector of $\forall K \in \Tcal_h: \int_{T_K} y /
\int_{T_K} 1$"""
name = 'de_average_variable'
arg_types = ('parameter',)
geometry = [(Volume, 'parameter')]
use_caches = {'state_in_volume_qp' : [['parameter']]}
def __init__( self, region, name = name, sign = 1 ):
Term.__init__( self, region, name, sign )
def __call__( self, diff_var = None, chunk_size = None, **kwargs ):
par, = self.get_args( **kwargs )
ap, vg = par.get_approximation( self.get_current_group(), 'Volume' )
cache = self.get_cache( 'state_in_volume_qp', 0 )
vec = cache( 'state', self.get_current_group(), 0, state = par )
vdim = vec.shape[-1]
shape = (chunk_size, 1, vdim, 1)
for out, chunk in self.char_fun( chunk_size, shape ):
status = vg.integrate_chunk( out, vec[chunk], chunk )
out1 = out / vg.variable( 2 )[chunk]
yield out1, chunk, status
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.