code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import torch
import torch.nn as nn
from torchvision.models import resnet50
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
model_urls = dict(
acc_920='https://github.com/khrlimam/facenet/releases/download/acc-0.920/model920-6be7e3e9.pth',
acc_921='https://github.com/khrlimam/facenet/releases/download/acc-0.92135/model921-af60fb4f.pth'
)
def load_state(arch, progress=True):
state = load_state_dict_from_url(model_urls.get(arch), progress=progress)
return state
def model_920(pretrained=True, progress=True):
model = FaceNetModel()
if pretrained:
state = load_state('acc_920', progress)
model.load_state_dict(state['state_dict'])
return model
def model_921(pretrained=True, progress=True):
model = FaceNetModel()
if pretrained:
state = load_state('acc_921', progress)
model.load_state_dict(state['state_dict'])
return model
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class FaceNetModel(nn.Module):
def __init__(self, pretrained=False):
super(FaceNetModel, self).__init__()
self.model = resnet50(pretrained)
embedding_size = 128
num_classes = 500
self.cnn = nn.Sequential(
self.model.conv1,
self.model.bn1,
self.model.relu,
self.model.maxpool,
self.model.layer1,
self.model.layer2,
self.model.layer3,
self.model.layer4)
# modify fc layer based on https://arxiv.org/abs/1703.07737
self.model.fc = nn.Sequential(
Flatten(),
# nn.Linear(100352, 1024),
# nn.BatchNorm1d(1024),
# nn.ReLU(),
nn.Linear(100352, embedding_size))
self.model.classifier = nn.Linear(embedding_size, num_classes)
def l2_norm(self, input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-10)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def freeze_all(self):
for param in self.model.parameters():
param.requires_grad = False
def unfreeze_all(self):
for param in self.model.parameters():
param.requires_grad = True
def freeze_fc(self):
for param in self.model.fc.parameters():
param.requires_grad = False
def unfreeze_fc(self):
for param in self.model.fc.parameters():
param.requires_grad = True
def freeze_only(self, freeze):
for name, child in self.model.named_children():
if name in freeze:
for param in child.parameters():
param.requires_grad = False
else:
for param in child.parameters():
param.requires_grad = True
def unfreeze_only(self, unfreeze):
for name, child in self.model.named_children():
if name in unfreeze:
for param in child.parameters():
param.requires_grad = True
else:
for param in child.parameters():
param.requires_grad = False
# returns face embedding(embedding_size)
def forward(self, x):
x = self.cnn(x)
x = self.model.fc(x)
features = self.l2_norm(x)
# Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf
alpha = 10
features = features * alpha
return features
def forward_classifier(self, x):
features = self.forward(x)
res = self.model.classifier(features)
return res | /res-facenet-0.0.5.tar.gz/res-facenet-0.0.5/res_facenet/models.py | 0.905659 | 0.403861 | models.py | pypi |
import numpy as np
from numpy.fft import fft, ifft
from numba import jit, prange, objmode
class ModelParams():
def __init__(N, d, tau, M = 16, const = 0):
self.N = N
self.d = d
self.L = L
self.tau = tau
self.M = M
self.const = const
self.k = np.concatenate((np.arange(int(N/2)), np.arange(-int(N/2), 0)))*2*np.pi/d
L = (1+const)*k**2.0 - k**4.0
self.E = np.exp(tau*L)
self.E2 = np.exp(tau/2*L)
r = np.exp(1j * np.pi * (np.arange(1, M+1)-0.5)/M)
LR = tau*(np.zeros((1,M)) + L.reshape(-1,1)) + (np.zeros((N,1)) + r)
self.Q = tau*np.real(np.mean((np.exp(LR/2)-1)/LR, axis = 1))
self.f1 = tau*np.real(np.mean((-4-LR+np.exp(LR)*(4-3*LR+LR**2.0))/(LR**3.0), axis = 1))
self.f2 = tau*np.real(np.mean((2+LR+np.exp(LR)*(-2+LR))/(LR**3.0), axis = 1))
self.f3 = tau*np.real(np.mean((-4-3*LR-LR**2.0+np.exp(LR)*(4-LR))/(LR**3.0), axis = 1))
self.g = -0.5*1j*k
@jit(nopython = True, fastmath = True, parallel = True)
def mean_numba_axis1(mat):
res = np.zeros(mat.shape[0])
for i in prange(mat.shape[0]):
res[i] = np.mean(mat[i])
return res
@jit(nopython = True, fastmath = True)
def precompute_KS_params(N, d, tau, M = 16, const = 0):
k = np.concatenate((np.arange(int(N/2)), np.arange(-int(N/2), 0)))*2*np.pi/d
L = (1+const)*k**2.0 - k**4.0
E = np.exp(tau*L)
E2 = np.exp(tau/2*L)
r = np.exp(1j * np.pi * (np.arange(1, M+1)-0.5)/M)
LR = tau*(np.zeros((1,M)) + L.reshape(-1,1)) + (np.zeros((N,1)) + r)
Q = tau*mean_numba_axis1(np.real((np.exp(LR/2)-1)/LR))
f1 = tau*mean_numba_axis1(np.real((-4-LR+np.exp(LR)*(4-3*LR+LR**2.0))/(LR**3.0)))
f2 = tau*mean_numba_axis1(np.real((2+LR+np.exp(LR)*(-2+LR))/(LR**3.0)))
f3 = tau*mean_numba_axis1(np.real((-4-3*LR-LR**2.0+np.exp(LR)*(4-LR))/(LR**3.0)))
g = -0.5*1j*k
params = np.zeros((7,N), dtype = np.complex128)
params[0] = E
params[1] = E2
params[2] = Q
params[3] = f1
params[4] = f2
params[5] = f3
params[6] = g
return params
@jit(nopython = True, fastmath = True)
def kursiv_forecast(u, params):
with objmode(unext = 'double[:]'):
v = fft(u)
Nv = params[6]*fft(np.real(ifft(v))**2.0)
a = params[1]*v + params[2]*Nv
Na = params[6]*fft(np.real(ifft(a))**2.0)
b = params[1]*v + params[2]*Na
Nb = params[6]*fft(np.real(ifft(b))**2.0)
c = params[1]*a + params[2]*(2*Nb - Nv)
Nc = params[6]*fft(np.real(ifft(c))**2.0)
vnext = params[0]*v + Nv*params[3] + 2*(Na+Nb)*params[4] + Nc*params[5]
unext = np.real(ifft(vnext))
return unext
@jit(nopython = True, fastmath = True)
def kursiv_forecast_pred(u, params):
u = np.ascontiguousarray(u.T)
with objmode(unext = 'double[:,:]'):
v = fft(u,axis = 1)
Nv = params[6]*fft(np.real(ifft(v, axis = 1))**2.0, axis = 1)
a = params[1]*v + params[2]*Nv
Na = params[6]*fft(np.real(ifft(a, axis = 1))**2.0, axis = 1)
b = params[1]*v + params[2]*Na
Nb = params[6]*fft(np.real(ifft(b, axis = 1))**2.0, axis = 1)
c = params[1]*a + params[2]*(2*Nb - Nv)
Nc = params[6]*fft(np.real(ifft(c, axis = 1))**2.0, axis = 1)
v = params[0]*v + Nv*params[3] + 2*(Na+Nb)*params[4] + Nc*params[5]
unext = np.real(ifft(v, axis = 1))
return unext.T
@jit(nopython = True, fastmath = True)
def kursiv_predict(u0, tau = 0.25, N = 64, d = 22, T = 100, params = np.array([[],[]], dtype = np.complex128), int_steps = 1):
if params.size == 0:
new_params = precompute_KS_params(N, d, tau)
else:
new_params = params
steps = T*int_steps
u_arr = np.zeros((N, steps+int_steps))
u_arr[:,0] = u0
for i in range(steps+int_steps-1):
u_arr[:,i+1] = kursiv_forecast(u_arr[:,i], new_params)
return np.ascontiguousarray(u_arr[:,::int_steps]), new_params
@jit(nopython = True, fastmath = True)
def kursiv_predict_pred(u0_array, tau = 0.25, N = 64, d = 22, T = 100, params = np.array([[],[]], dtype = np.complex128)):
if params.size == 0:
new_params = precompute_KS_params(N, d, tau)
else:
new_params = params
steps = T
u_arr = kursiv_forecast_pred(u0_array, new_params)
return u_arr, new_params | /res_reg_lmnt_awikner-0.0.1.tar.gz/res_reg_lmnt_awikner-0.0.1/src/res_reg_lmnt_awikner/ks_etdrk4.py | 0.407569 | 0.318313 | ks_etdrk4.py | pypi |
# Bootstrapping and machine learning
We recently improved the interface of `resample` to make it easy to bootstrap training data sets for machine learning (ML) classifiers. So, this example demonstrates how one can bootstrap the ROC curve of a classifier from the training data set, without a separate validation set. In other words, this allows one to use the full data set for training and one obtains a very smooth ROC curve.
Sounds too good to be true? Maybe it is! The bootstrap only work well with classifiers that build a smooth representation of the decision boundary, like a neural network. It does not work well with classifiers that use sharp decision boundaries which depend on the locations of individual points, like a boosted decision tree, random forest, or a kNN.
Below we compute a bootstrapped ROC curve for the MLP and RandomForest classifiers from Scikit-Learn, a standard ROC curve from a train-test split, and finally from a separately generated high-statistics data set. The latter serves as an estimate of the "true" ROC curve. In case of the Random Forest, the bootstrapped ROC curve is too optimistic, while in case of the MLP it is ok.
```
from resample.bootstrap import resample
from sklearn import datasets
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import plot_roc_curve, roc_curve
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
import numpy as np
# original data
X, y = datasets.make_moons(1000, noise=0.3, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
# original classifiers
mlp = MLPClassifier(max_iter=1000) # iterations increased to avoid warning
mlp.fit(X_train, y_train)
rf = RandomForestClassifier(random_state=1)
rf.fit(X_train, y_train)
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
h = 0.02
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
cm = plt.cm.RdBu
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
for axi, clf in zip(ax, [mlp, rf]):
plt.sca(axi)
# plot the training points
plt.plot(X_train[:, 0][y_train == 0], X_train[:, 1][y_train == 0], "o",
color=cm(0.0), mec="w", label="signal")
plt.plot(X_train[:, 0][y_train == 1], X_train[:, 1][y_train == 1], "D",
color=cm(1.0), mec="w", label="background")
# plot models
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=cm, alpha=0.5, zorder=0)
plt.xlabel("feature 1")
plt.ylabel("feature 2")
plt.legend(frameon=False);
# generate ROC curve with validation set (standard method)
fpr1 = {}
tpr1 = {}
for clf in (mlp, rf):
fpr1[clf], tpr1[clf], _ = roc_curve(y_test, clf.predict_proba(X_test)[:, 1])
# generate ROC curve from separately-generated high-statistics data set
fpr2 = {}
tpr2 = {}
X_hs, y_hs = datasets.make_moons(100000, noise=0.3, random_state=1)
for clf in (mlp, rf):
fpr2[clf], tpr2[clf], _ = roc_curve(y_hs, clf.predict_proba(X_hs)[:, 1])
# generate ROC curve from training data with 20 bootstrap samples
fpr3 = {}
tpr3 = {}
w_s = {}
w_b = {}
for clf in (mlp, rf):
s = 0
b = 0
xrange = (0, 1)
bins = 50
for Xi, yi in resample(X_train, y_train, size=20):
clf.fit(Xi, yi)
pi = clf.predict_proba(X)[:, 1]
s += np.histogram(pi[y == 1], range=xrange, bins=bins)[0]
b += np.histogram(pi[y == 0], range=xrange, bins=bins)[0]
w_s[clf] = s
w_b[clf] = b
tpr3[clf] = 1 - np.cumsum(s) / np.sum(s)
fpr3[clf] = 1 - np.cumsum(b) / np.sum(b)
for clf in (mlp, rf):
# plot score distributions for signal and background
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
plt.suptitle({mlp: "MLP", rf: "Random Forest"}[clf])
xe = np.linspace(*xrange, bins + 1)
plt.sca(ax[0])
plt.stairs(w_s[clf], xe, fill=True, alpha=0.5, label="signal")
plt.stairs(w_b[clf], xe, fill=True, alpha=0.5, label="background")
plt.legend(frameon=False, loc="upper center")
plt.xlabel("classifier score");
# plot ROC curves
plt.sca(ax[1])
plt.plot(fpr1[clf], tpr1[clf], ls="--", label="train-test split")
plt.plot(fpr2[clf], tpr2[clf], ls=":", label="high-statistics")
plt.plot(fpr3[clf], tpr3[clf], drawstyle="steps-post", label="bootstrap")
plt.legend(frameon=False)
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
```
| /resample-1.6.0.tar.gz/resample-1.6.0/doc/tutorial/sklearn.ipynb | 0.87157 | 0.974629 | sklearn.ipynb | pypi |
# Bootstrap and Jackknife comparison
In this notebook we compare the bootstrap to the jackknife. Bootstrap resampling is superior to jackknifing, but the jackknife is deterministic, which may be helpful, and it can exactly remove biases of order 1/N from an estimator (the bootstrap removes biases of higher orders, too, but it does not remove the lowest order exactly).
```
from resample import jackknife as j, bootstrap as b
import numpy as np
from scipy import stats
rng = np.random.default_rng(1)
data = rng.normal(size=20)
# get mean and std deviation
def fn(d):
return np.mean(d), np.var(d, ddof=0) # we return the biased variance
# exact bias for biased standard deviation
# - we computed: s = 1/N * sum(x ** 2 - np.mean(x) ** 2)
# - correct is: N/(N-1) * s
# - bias is: (1 - N/(N-1)) * s = (N - 1 - N) / (N - 1) * s = - 1 / (N - 1) * s
print("estimates ", np.round(fn(data), 3))
print("std.dev. (jackknife)", np.round(j.variance(fn, data) ** 0.5, 3))
print("std.dev. (bootstrap)", np.round(b.variance(fn, data, random_state=1) ** 0.5, 3))
print("bias (jackknife) ", np.round(j.bias(fn, data), 3))
print("bias (bootstrap) ", np.round(b.bias(fn, data, random_state=1), 3))
print("bias (exact) ", np.round((0, -1 / (len(data) - 1) * fn(data)[1]), 3))
```
The standard deviations for the estimates computed by bootstrap and jackknife differ by about 10 %. This difference shrinks for larger data sets.
Both resampling methods find no bias for the mean, and a small bias for the (not bias-corrected) variance. The jackknife is getting closer, since the bias for sufficiently large N is dominated by the O(1/N) order that the jackknife removes exactly.
| /resample-1.6.0.tar.gz/resample-1.6.0/doc/tutorial/jackknife_vs_bootstrap.ipynb | 0.658637 | 0.942771 | jackknife_vs_bootstrap.ipynb | pypi |
# USP test of on continuous data
We demonstrate how the [USP test of independence](https://doi.org/10.1098/rspa.2021.0549) can be applied to continuous data.
A test of independence is stronger than a test for zero correlation. A test of independence can also detect dependencies which give zero correlation.
```
from resample import permutation as perm
import matplotlib.pyplot as plt
import numpy as np
rng = np.random.default_rng(1)
x1 = rng.normal(0, 2, size=100)
y1 = rng.normal(0, 3, size=100)
cov = np.empty((2, 2))
cov[0, 0] = 2 ** 2
cov[1, 1] = 3 ** 2
rho = 0.5
cov[0, 1] = rho * np.sqrt(cov[0, 0] * cov[1, 1])
cov[1, 0] = cov[0, 1]
xy2 = rng.multivariate_normal([0, 0], cov, size=500)
d = {"x,y are independent": (x1, y1), "x,y are correlated": xy2.T}
for label, (x, y) in d.items():
# input is a histogram
w, xe, ye = np.histogram2d(x, y)
# apply USP test
r = perm.usp(w, random_state=1)
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
plt.sca(ax[0])
plt.pcolormesh(xe, ye, w.T)
plt.sca(ax[1])
plt.hist(r.samples, bins=20, label="test statistic under\nnull hypothesis")
plt.axvline(r.statistic, color="k", label="test statistic\nfrom input")
plt.suptitle(f"{label}: p-value={r.pvalue:.3f}")
plt.legend()
```
| /resample-1.6.0.tar.gz/resample-1.6.0/doc/tutorial/usp_continuous_data.ipynb | 0.723993 | 0.991915 | usp_continuous_data.ipynb | pypi |
# Variance of fit parameters
We use the bootstrap and the jackknife to compute the uncertainties of a non-linear least-squares fit. The bootstrap is generally superior to the jackknife, which we will also see here. We use `scipy.optimize.curve_fit` to perform the fit, which also estimates the parameter uncertainties with asymptotic theory. For reference, we also doing a Monte-Carlo simulation of the experiment with a large number of tries, to have a reference for the parameter uncertainties.
In this case, the asymptotic theory estimate is very accurate, while the bootstrap and the jackknife estimates are similar and off. The accuracy of the non-parametric methods improves with the sample size.
```
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from resample import bootstrap, jackknife
rng = np.random.default_rng(1)
# generate some random data, each y value scatters randomly
x = np.linspace(0, 1, 100)
y = 1 + 10 * x ** 2
ye = 0.5 + x
y += rng.normal(0, ye)
def model(x, a, b, c):
return a + b * x + c * x ** 2
def fit(x, y, ye):
return curve_fit(model, x, y, sigma=ye, absolute_sigma=True)
# fit original data and compute covariance estimate from asymptotic theory
par, cov = fit(x, y, ye)
plt.errorbar(x, y, ye, fmt="o", label="data")
xm = np.linspace(np.min(x), np.max(x), 1000)
plt.plot(xm, model(xm, *par), label="fit")
plt.legend();
# now only return fit parameters
def fit2(x, y, ye):
return fit(x, y, ye)[0]
# jackknife and bootstrap
jvar = jackknife.variance(fit2, x, y, ye)
bvar = bootstrap.variance(fit2, x, y, ye, size=1000, random_state=1)
# Monte-Carlo simulation for reference
mvar = []
for itry in range(1000):
y2 = 1 + 10 * x ** 2 + rng.normal(0, ye)
mvar.append(fit2(x, y2, ye))
mvar = np.var(mvar, axis=0)
for n, p, e, ej, eb, em in zip("abc", par,
np.diag(cov) ** 0.5,
jvar ** 0.5,
bvar ** 0.5,
mvar ** 0.5):
print(f"{n} = {p:5.2f} +/- {e:1.2f} "
f"jackknife={ej:1.2f} "
f"bootstrap={eb:1.2f} "
f"MC={em:1.2f}")
```
| /resample-1.6.0.tar.gz/resample-1.6.0/doc/tutorial/variance_fit_parameters.ipynb | 0.526586 | 0.981347 | variance_fit_parameters.ipynb | pypi |
# Confidence intervals
In this notebook, we look at the confidence interval methods in `resample`. We try them on the median of an exponential distribution.
```
import numpy as np
from resample.bootstrap import confidence_interval as ci, bootstrap
import matplotlib.pyplot as plt
rng = np.random.default_rng(1)
# generate data
data = rng.exponential(size=1000)
# generate confidence intervals
cis = {
m: ci(np.median, data, cl=0.68, size=100, ci_method=m, random_state=rng)
for m in ("percentile", "bca")
}
# compute mean and std. deviation of replicates
rep = bootstrap(np.median, data, size=1000, random_state=rng)
mr = np.mean(rep)
sr = np.std(rep)
# draw everything
for i, (m, v) in enumerate(cis.items()):
for j in (0, 1):
plt.axvline(v[j], color=f"C{i}", label=m if j == 0 else None)
plt.hist(rep, facecolor="0.8")
plt.axvline(np.log(2), lw=3, color="k")
plt.errorbar(mr, 100, 0, sr, fmt="o")
plt.legend();
```
The mean of the replicates and its standard deviation is shown with the dot and the horizontal error bar. The three interval methods are shown as thin vertical lines. The thick black line is the true value of the median for an exponential distribution.
| /resample-1.6.0.tar.gz/resample-1.6.0/doc/tutorial/confidence_intervals.ipynb | 0.617051 | 0.95511 | confidence_intervals.ipynb | pypi |
[Hugues Hoppe](https://hhoppe.com/)
Aug 2022.
[**[Open in Colab]**](https://colab.research.google.com/github/hhoppe/resampler/blob/main/resampler_notebook.ipynb)
[**[Kaggle]**](https://www.kaggle.com/notebooks/welcome?src=https://github.com/hhoppe/resampler/blob/main/resampler_notebook.ipynb)
[**[MyBinder]**](https://mybinder.org/v2/gh/hhoppe/resampler/main?filepath=resampler_notebook.ipynb)
[**[DeepNote]**](https://deepnote.com/launch?url=https%3A%2F%2Fgithub.com%2Fhhoppe%2Fresampler%2Fblob%2Fmain%2Fresampler_notebook.ipynb)
[**[GitHub source]**](https://github.com/hhoppe/resampler)
[**[API docs]**](https://hhoppe.github.io/resampler/)
[**[PyPI package]**](https://pypi.org/project/resampler/)
The notebook
[<samp>resampler_notebook.ipynb</samp>](https://colab.research.google.com/github/hhoppe/resampler/blob/main/resampler_notebook.ipynb)
demonstrates the
[<samp>resampler</samp> library](https://pypi.org/project/resampler/)
and contains documentation, usage examples, unit tests, and experiments.
# Overview
The `resampler` library enables fast differentiable resizing and warping of arbitrary grids.
It supports:
- grids of **any dimension** (e.g., 1D, 2D images, 3D video, 4D batches of videos), containing
- **samples of any shape** (e.g., scalars, colors, motion vectors, Jacobian matrices) and
- any **numeric type** (e.g., `uint8`, `float64`, `complex128`)
- within several **array libraries**]
(`numpy`, `tensorflow`, `torch`, and `jax`);
- either `'dual'` ("half-integer") or `'primal'` **grid-type** for each dimension;
- many **boundary** rules, specified per dimension, extensible via subclassing;
- an extensible set of **filter** kernels, selectable per dimension;
- optional **gamma** transfer functions for correct linear-space filtering;
- prefiltering for accurate **antialiasing** when `resize` downsampling;
- efficient backpropagation of **gradients**
for `tensorflow`, `torch`, and `jax`;
- few dependencies (only `numpy` and `scipy`) and **no C extension code**, yet
- **faster resizing** than C++ implementations
in `tf.image` and `torch.nn`.
A key strategy is to leverage existing sparse matrix representations and operations.
## Example usage
```python
!pip install -q mediapy resampler
import mediapy as media
import numpy as np
import resampler
```
```python
array = np.random.default_rng(1).random((4, 6, 3)) # 4x6 RGB image.
upsampled = resampler.resize(array, (128, 192)) # To 128x192 resolution.
media.show_images({'4x6': array, '128x192': upsampled}, height=128)
```
> <img src="https://github.com/hhoppe/resampler/raw/main/media/example_array_upsampled.png"/>
```python
image = media.read_image('https://github.com/hhoppe/data/raw/main/image.png')
downsampled = resampler.resize(image, (32, 32))
media.show_images({'128x128': image, '32x32': downsampled}, height=128)
```
> <img src="https://github.com/hhoppe/resampler/raw/main/media/example_array_downsampled.png"/>
```python
import matplotlib.pyplot as plt
```
```python
array = [3.0, 5.0, 8.0, 7.0] # 4 source samples in 1D.
new_dual = resampler.resize(array, (32,)) # (default gridtype='dual') 8x resolution.
new_primal = resampler.resize(array, (25,), gridtype='primal') # 8x resolution.
_, axs = plt.subplots(1, 2, figsize=(7, 1.5))
axs[0].set_title("gridtype='dual'")
axs[0].plot((np.arange(len(array)) + 0.5) / len(array), array, 'o')
axs[0].plot((np.arange(len(new_dual)) + 0.5) / len(new_dual), new_dual, '.')
axs[1].set_title("gridtype='primal'")
axs[1].plot(np.arange(len(array)) / (len(array) - 1), array, 'o')
axs[1].plot(np.arange(len(new_primal)) / (len(new_primal) - 1), new_primal, '.')
plt.show()
```
> <img src="https://github.com/hhoppe/resampler/raw/main/media/examples_1d_upsampling.png"/>
```python
batch_size = 4
batch_of_images = media.moving_circle((16, 16), batch_size)
upsampled = resampler.resize(batch_of_images, (batch_size, 64, 64))
media.show_videos({'original': batch_of_images, 'upsampled': upsampled}, fps=1)
```
> original
<img src="https://github.com/hhoppe/resampler/raw/main/media/batch_original.gif"/>
upsampled
<img src="https://github.com/hhoppe/resampler/raw/main/media/batch_upsampled.gif"/>
Most examples above use the default
`resize()` settings:
- `gridtype='dual'` for both source and destination arrays,
- `boundary='auto'`
which uses `'reflect'` for upsampling and `'clamp'` for downsampling,
- `filter='lanczos3'`
(a [Lanczos](https://en.wikipedia.org/wiki/Lanczos_resampling) kernel with radius 3),
- `gamma=None` which by default uses the `'power2'`
transfer function for the `uint8` image in the second example,
- `scale=1.0, translate=0.0` (no domain transformation),
- default `precision` and output `dtype`.
## Advanced usage:
Map an image to a wider grid using custom `scale` and `translate` vectors,
with horizontal `'reflect'` and vertical `'natural'` boundary rules,
providing a constant value for the exterior,
using different filters (Lanczos and O-MOMS) in the two dimensions,
disabling gamma correction, performing computations in double-precision,
and returning an output array in single-precision:
```python
new = resampler.resize(
image, (128, 512), boundary=('natural', 'reflect'), cval=(0.2, 0.7, 0.3),
filter=('lanczos3', 'omoms5'), gamma='identity', scale=(0.8, 0.25),
translate=(0.1, 0.35), precision='float64', dtype='float32')
media.show_images({'image': image, 'new': new})
```
> <img src="https://github.com/hhoppe/resampler/raw/main/media/example_advanced_usage1.png"/>
Warp an image by transforming it using
[polar coordinates](https://en.wikipedia.org/wiki/Polar_coordinate_system):
```python
shape = image.shape[:2]
yx = ((np.indices(shape).T + 0.5) / shape - 0.5).T # [-0.5, 0.5]^2
radius, angle = np.linalg.norm(yx, axis=0), np.arctan2(*yx)
angle += (0.8 - radius).clip(0, 1) * 2.0 - 0.6
coords = np.dstack((np.sin(angle) * radius, np.cos(angle) * radius)) + 0.5
resampled = resampler.resample(image, coords, boundary='constant')
media.show_images({'image': image, 'resampled': resampled})
```
> <img src="https://github.com/hhoppe/resampler/raw/main/media/example_warp.png"/>
## Limitations:
- Filters are assumed to be [separable](https://en.wikipedia.org/wiki/Separable_filter).
- Although `resize` implements prefiltering, `resample` does not yet have it (and therefore
may have aliased results if downsampling).
- Differentiability is only with respect to the grid values,
not wrt the resize shape, scale, translation, or the resampling coordinates.
| /resampler-0.7.3.tar.gz/resampler-0.7.3/README.md | 0.525612 | 0.961858 | README.md | pypi |
from resc_backend.helpers.environment_wrapper import EnvironmentVariable
ENABLE_CORS = 'ENABLE_CORS'
CORS_ALLOWED_DOMAINS = 'CORS_ALLOWED_DOMAINS'
AUTHENTICATION_REQUIRED = 'AUTHENTICATION_REQUIRED'
SSO_ACCESS_TOKEN_ISSUER_URL = 'SSO_ACCESS_TOKEN_ISSUER_URL'
SSO_ACCESS_TOKEN_JWKS_URL = 'SSO_ACCESS_TOKEN_JWKS_URL'
SSO_JWT_SIGN_ALGORITHM = 'SSO_JWT_SIGN_ALGORITHM'
SSO_JWT_REQUIRED_CLAIMS = 'SSO_JWT_REQUIRED_CLAIMS'
SSO_JWT_CLAIM_KEY_USER_ID = 'SSO_JWT_CLAIM_KEY_USER_ID'
SSO_JWT_CLAIM_KEY_AUTHORIZATION = 'SSO_JWT_CLAIM_KEY_AUTHORIZATION'
SSO_JWT_CLAIM_VALUE_AUTHORIZATION = 'SSO_JWT_CLAIM_VALUE_AUTHORIZATION'
RESC_REDIS_CACHE_ENABLE = 'RESC_REDIS_CACHE_ENABLE'
RESC_REDIS_SERVICE_HOST = 'RESC_REDIS_SERVICE_HOST'
RESC_REDIS_SERVICE_PORT = 'RESC_REDIS_SERVICE_PORT'
REDIS_PASSWORD = 'REDIS_PASSWORD'
WEB_SERVICE_ENV_VARS = [
EnvironmentVariable(
ENABLE_CORS,
"Enable by providing the value true to allow CORS requests.",
required=False,
default='',
),
EnvironmentVariable(
CORS_ALLOWED_DOMAINS,
"Comma separated lists of domains to allow in the CORS policy if ENABLE_CORS is true",
required=False,
default='',
),
EnvironmentVariable(
AUTHENTICATION_REQUIRED,
"set to false to disable authentication, any other value will enable SSO authentication",
required=False,
default='',
),
EnvironmentVariable(
RESC_REDIS_CACHE_ENABLE,
"Set to true to enable the redis cache, its expected to be running separately from this instance",
required=False,
default='False',
)
]
CONDITIONAL_SSO_ENV_VARS = [
EnvironmentVariable(
SSO_ACCESS_TOKEN_ISSUER_URL,
"URL of the access token issuer",
required=True,
),
EnvironmentVariable(
SSO_ACCESS_TOKEN_JWKS_URL,
"URL of the access token JWKS",
required=True,
),
EnvironmentVariable(
SSO_JWT_SIGN_ALGORITHM,
"signing algorithm used for the JWT, for example RS256",
required=True,
),
EnvironmentVariable(
SSO_JWT_REQUIRED_CLAIMS,
"Comma separated list of claims that need to be in the JWT",
required=True,
),
EnvironmentVariable(
SSO_JWT_CLAIM_KEY_USER_ID,
"JWT claim key used for the user id",
required=True,
),
EnvironmentVariable(
SSO_JWT_CLAIM_KEY_AUTHORIZATION,
"JWT claim key used for the authorization check",
required=True,
),
EnvironmentVariable(
SSO_JWT_CLAIM_VALUE_AUTHORIZATION,
"JWT claim value used for the authorization check. "
"Used as string contains check on the key from SSO_JWT_CLAIM_KEY_AUTHORIZATION",
required=True,
),
]
CONDITIONAL_REDIS_ENV_VARS = [
EnvironmentVariable(
RESC_REDIS_SERVICE_HOST,
"The hostname/IP address of the REDIS server.",
required=True,
),
EnvironmentVariable(
RESC_REDIS_SERVICE_PORT,
"The port on which the REDIS server is running.",
required=True,
),
EnvironmentVariable(
REDIS_PASSWORD,
"The REDIS authentication secret.",
required=True,
),
] | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/configuration.py | 0.541409 | 0.19789 | configuration.py | pypi |
import logging
from datetime import datetime, timedelta
from typing import List
# Third Party
from sqlalchemy import and_, extract, func, or_, union
from sqlalchemy.engine import Row
from sqlalchemy.orm import Session
# First Party
from resc_backend.constants import DEFAULT_RECORDS_PER_PAGE_LIMIT, MAX_RECORDS_PER_PAGE_LIMIT
from resc_backend.db import model
from resc_backend.resc_web_service.crud import scan_finding as scan_finding_crud
from resc_backend.resc_web_service.filters import FindingsFilter
from resc_backend.resc_web_service.schema import finding as finding_schema
from resc_backend.resc_web_service.schema.date_filter import DateFilter
from resc_backend.resc_web_service.schema.finding_status import FindingStatus
from resc_backend.resc_web_service.schema.scan_type import ScanType
from resc_backend.resc_web_service.schema.vcs_provider import VCSProviders
logger = logging.getLogger(__name__)
def patch_finding(db_connection: Session, finding_id: int, finding_update: finding_schema.FindingPatch):
db_finding = db_connection.query(model.DBfinding).filter_by(id_=finding_id).first()
finding_update_dict = finding_update.dict(exclude_unset=True)
for key in finding_update_dict:
setattr(db_finding, key, finding_update_dict[key])
db_connection.commit()
db_connection.refresh(db_finding)
return db_finding
def create_findings(db_connection: Session, findings: List[finding_schema.FindingCreate]) -> List[model.DBfinding]:
if len(findings) < 1:
# Function is called with an empty list of findings
return []
repository_id = findings[0].repository_id
# get a list of known / registered findings for this repository
db_repository_findings = db_connection.query(model.DBfinding).\
filter(model.DBfinding.repository_id == repository_id).all()
# Compare new findings list with findings in the db
new_findings = findings[:]
db_findings = []
for finding in findings:
for repository_finding in db_repository_findings:
# Compare based on the unique key in the findings table
if repository_finding.commit_id == finding.commit_id and \
repository_finding.rule_name == finding.rule_name and \
repository_finding.file_path == finding.file_path and \
repository_finding.line_number == finding.line_number and \
repository_finding.column_start == finding.column_start and \
repository_finding.column_end == finding.column_end:
# Store the already known finding
db_findings.append(repository_finding)
# Remove from the db_repository_findings to increase performance for the next loop
db_repository_findings.remove(repository_finding)
# Remove from the to be created findings
new_findings.remove(finding)
break
logger.info(f"create_findings repository {repository_id}, Requested: {len(findings)}. "
f"New findings: {len(new_findings)}. Already in db: {len(db_findings)}")
db_create_findings = []
# Map the to be created findings to the DBfinding type object
for new_finding in new_findings:
db_create_finding = model.finding.DBfinding.create_from_finding(new_finding)
db_create_findings.append(db_create_finding)
# Store all the to be created findings in the database
if len(db_create_findings) >= 1:
db_connection.add_all(db_create_findings)
db_connection.flush()
db_connection.commit()
db_findings.extend(db_create_findings)
# Return the known findings that are part of the request and the newly created findings
return db_findings
def get_finding(db_connection: Session, finding_id: int):
finding = db_connection.query(model.DBfinding)
finding = finding.filter(model.finding.DBfinding.id_ == finding_id).first()
return finding
def get_findings(db_connection: Session, skip: int = 0,
limit: int = DEFAULT_RECORDS_PER_PAGE_LIMIT):
limit_val = MAX_RECORDS_PER_PAGE_LIMIT if limit > MAX_RECORDS_PER_PAGE_LIMIT else limit
findings = db_connection.query(model.DBfinding)
findings = findings.order_by(model.finding.DBfinding.id_).offset(skip).limit(limit_val).all()
return findings
def get_scans_findings(db_connection, scan_ids: [int], skip: int = 0, limit: int = DEFAULT_RECORDS_PER_PAGE_LIMIT,
rules_filter: [str] = None, statuses_filter: [FindingStatus] = None) -> [model.DBfinding]:
"""
Retrieve all finding child objects of a scan object from the database
:param db_connection:
Session of the database connection
:param scan_ids:
ids of the parent scan object of which to retrieve finding objects
:param skip:
integer amount of records to skip to support pagination
:param limit:
integer amount of records to return, to support pagination
:param rules_filter:
optional, filter on rule name. Is used as a string contains filter
:param statuses_filter:
optional, filter on status of findings
:return: [DBfinding]
The output will contain a list of DBfinding type objects,
or an empty list if no finding was found for the given scan_ids
"""
if len(scan_ids) == 0:
return []
limit_val = MAX_RECORDS_PER_PAGE_LIMIT if limit > MAX_RECORDS_PER_PAGE_LIMIT else limit
query = db_connection.query(model.DBfinding)
query = query.join(model.DBscanFinding,
model.scan_finding.DBscanFinding.finding_id == model.finding.DBfinding.id_)
if statuses_filter:
# subquery to select latest audit ids findings
max_audit_subquery = db_connection.query(model.DBaudit.finding_id,
func.max(model.DBaudit.id_).label("audit_id")) \
.group_by(model.DBaudit.finding_id).subquery()
query = query \
.join(max_audit_subquery, max_audit_subquery.c.finding_id == model.finding.DBfinding.id_,
isouter=True) \
.join(model.DBaudit, and_(model.audit.DBaudit.finding_id == model.finding.DBfinding.id_,
model.audit.DBaudit.id_ == max_audit_subquery.c.audit_id),
isouter=True)
if FindingStatus.NOT_ANALYZED in statuses_filter:
query = query.filter(or_(model.DBaudit.status.in_(statuses_filter),
model.DBaudit.status == None)) # noqa: E711
else:
query = query.filter(model.DBaudit.status.in_(statuses_filter))
query = query.filter(model.DBscanFinding.scan_id.in_(scan_ids))
if rules_filter:
query = query.filter(model.DBfinding.rule_name.in_(rules_filter))
findings = query.order_by(model.finding.DBfinding.id_).offset(skip).limit(limit_val).all()
return findings
def get_total_findings_count(db_connection: Session, findings_filter: FindingsFilter = None) -> int:
"""
Retrieve count of finding records of a given scan
:param findings_filter:
:param db_connection:
Session of the database connection
:return: total_count
count of findings
"""
total_count_query = db_connection.query(func.count(model.DBfinding.id_))
if findings_filter:
if findings_filter.finding_statuses:
# subquery to select latest audit ids findings
max_audit_subquery = db_connection.query(model.DBaudit.finding_id,
func.max(model.DBaudit.id_).label("audit_id")) \
.group_by(model.DBaudit.finding_id).subquery()
total_count_query = total_count_query \
.join(max_audit_subquery, max_audit_subquery.c.finding_id == model.finding.DBfinding.id_,
isouter=True) \
.join(model.DBaudit, and_(model.audit.DBaudit.finding_id == model.finding.DBfinding.id_,
model.audit.DBaudit.id_ == max_audit_subquery.c.audit_id),
isouter=True)
if (findings_filter.vcs_providers and findings_filter.vcs_providers is not None) \
or findings_filter.project_name \
or findings_filter.repository_name or findings_filter.start_date_time \
or findings_filter.end_date_time:
total_count_query = total_count_query \
.join(model.DBscanFinding,
model.scan_finding.DBscanFinding.finding_id == model.finding.DBfinding.id_) \
.join(model.DBscan,
model.scan.DBscan.id_ == model.scan_finding.DBscanFinding.scan_id) \
.join(model.DBrepository,
model.repository.DBrepository.id_ == model.scan.DBscan.repository_id) \
.join(model.DBVcsInstance,
model.vcs_instance.DBVcsInstance.id_ == model.repository.DBrepository.vcs_instance)
if findings_filter.start_date_time:
total_count_query = total_count_query.filter(
model.scan.DBscan.timestamp >= findings_filter.start_date_time)
if findings_filter.end_date_time:
total_count_query = total_count_query.filter(model.scan.DBscan.timestamp <= findings_filter.end_date_time)
if findings_filter.repository_name:
total_count_query = total_count_query.filter(
model.DBrepository.repository_name == findings_filter.repository_name)
if findings_filter.vcs_providers and findings_filter.vcs_providers is not None:
total_count_query = total_count_query.filter(
model.vcs_instance.DBVcsInstance.provider_type.in_(findings_filter.vcs_providers))
if findings_filter.project_name:
total_count_query = total_count_query.filter(
model.repository.DBrepository.project_key == findings_filter.project_name)
if findings_filter.rule_names:
total_count_query = total_count_query.filter(model.DBfinding.rule_name.in_(findings_filter.rule_names))
if findings_filter.finding_statuses:
if FindingStatus.NOT_ANALYZED in findings_filter.finding_statuses:
total_count_query = total_count_query. \
filter(or_(model.DBaudit.status.in_(findings_filter.finding_statuses),
model.DBaudit.status == None)) # noqa: E711
else:
total_count_query = total_count_query.filter(model.DBaudit.status.in_(findings_filter.finding_statuses))
if findings_filter.scan_ids and len(findings_filter.scan_ids) == 1:
total_count_query = total_count_query.join(
model.DBscanFinding, model.scan_finding.DBscanFinding.finding_id == model.finding.DBfinding.id_)
total_count_query = total_count_query.filter(
model.scan_finding.DBscanFinding.scan_id == findings_filter.scan_ids[0])
if findings_filter.scan_ids and len(findings_filter.scan_ids) >= 2:
total_count_query = total_count_query.join(
model.DBscanFinding, model.scan_finding.DBscanFinding.finding_id == model.finding.DBfinding.id_)
total_count_query = total_count_query.filter(
model.scan_finding.DBscanFinding.scan_id.in_(findings_filter.scan_ids))
total_count = total_count_query.scalar()
return total_count
def get_findings_by_rule(db_connection: Session, skip: int = 0, limit: int = DEFAULT_RECORDS_PER_PAGE_LIMIT,
rule_name: str = ""):
limit_val = MAX_RECORDS_PER_PAGE_LIMIT if limit > MAX_RECORDS_PER_PAGE_LIMIT else limit
findings = db_connection.query(model.DBfinding)
findings = findings.filter(model.DBfinding.rule_name == rule_name)
findings = findings.order_by(model.finding.DBfinding.id_).offset(skip).limit(limit_val).all()
return findings
def get_distinct_rules_from_findings(db_connection: Session, scan_id: int = -1,
finding_statuses: [FindingStatus] = None,
vcs_providers: [VCSProviders] = None,
project_name: str = "",
repository_name: str = "",
start_date_time: datetime = None,
end_date_time: datetime = None,
rule_pack_versions: [str] = None) -> \
List[model.DBrule]:
"""
Retrieve distinct rules detected
:param db_connection:
Session of the database connection
:param scan_id:
Optional filter by the id of a scan
:param finding_statuses:
Optional, filter of supported finding statuses
:param vcs_providers:
Optional, filter of supported vcs provider types
:param project_name:
Optional, filter on project name. Is used as a full string match filter
:param repository_name:
optional, filter on repository name. Is used as a string contains filter
:param start_date_time:
optional, filter on start date
:param end_date_time:
optional, filter on end date
:param rule_pack_versions:
optional, filter on rule pack version
:return: rules
List of unique rules
"""
query = db_connection.query(model.DBfinding.rule_name)
if (vcs_providers or project_name or repository_name or start_date_time or end_date_time or rule_pack_versions) \
and scan_id < 0:
query = query \
.join(model.DBscanFinding,
model.scan_finding.DBscanFinding.finding_id == model.finding.DBfinding.id_) \
.join(model.DBscan,
model.scan.DBscan.id_ == model.scan_finding.DBscanFinding.scan_id) \
.join(model.DBrepository,
model.repository.DBrepository.id_ == model.scan.DBscan.repository_id) \
.join(model.DBVcsInstance,
model.vcs_instance.DBVcsInstance.id_ == model.repository.DBrepository.vcs_instance)
if finding_statuses:
# subquery to select latest audit ids findings
max_audit_subquery = db_connection.query(model.DBaudit.finding_id,
func.max(model.DBaudit.id_).label("audit_id")) \
.group_by(model.DBaudit.finding_id).subquery()
query = query \
.join(max_audit_subquery, max_audit_subquery.c.finding_id == model.finding.DBfinding.id_,
isouter=True) \
.join(model.DBaudit, and_(model.audit.DBaudit.finding_id == model.finding.DBfinding.id_,
model.audit.DBaudit.id_ == max_audit_subquery.c.audit_id),
isouter=True)
if scan_id > 0:
query = query.join(model.DBscanFinding,
model.scan_finding.DBscanFinding.finding_id == model.finding.DBfinding.id_)
query = query.filter(model.DBscanFinding.scan_id == scan_id)
else:
if finding_statuses:
if FindingStatus.NOT_ANALYZED in finding_statuses:
query = query. \
filter(or_(model.DBaudit.status.in_(finding_statuses),
model.DBaudit.status == None)) # noqa: E711
else:
query = query.filter(model.DBaudit.status.in_(finding_statuses))
if vcs_providers:
query = query.filter(model.DBVcsInstance.provider_type.in_(vcs_providers))
if project_name:
query = query.filter(model.DBrepository.project_key == project_name)
if repository_name:
query = query.filter(model.DBrepository.repository_name == repository_name)
if start_date_time:
query = query.filter(model.scan.DBscan.timestamp >= start_date_time)
if end_date_time:
query = query.filter(model.scan.DBscan.timestamp <= end_date_time)
if rule_pack_versions:
query = query.filter(model.DBscan.rule_pack.in_(rule_pack_versions))
rules = query.distinct().order_by(model.DBfinding.rule_name).all()
return rules
def get_findings_count_by_status(db_connection: Session, scan_ids: List[int] = None,
finding_statuses: [FindingStatus] = None, rule_name: str = ""):
"""
Retrieve count of findings based on finding status
:param db_connection:
Session of the database connection
:param scan_ids:
List of scan ids for which findings should be filtered
:param finding_statuses:
finding statuses to filter, type FindingStatus
:param rule_name:
rule_name to filter on
:return: findings_count
count of findings
"""
# subquery to select latest audit ids findings
max_audit_subquery = db_connection.query(model.DBaudit.finding_id,
func.max(model.DBaudit.id_).label("audit_id")) \
.group_by(model.DBaudit.finding_id).subquery()
query = db_connection.query(func.count(model.DBfinding.id_).label('status_count'), model.DBaudit.status)
query = query \
.join(max_audit_subquery, max_audit_subquery.c.finding_id == model.finding.DBfinding.id_,
isouter=True) \
.join(model.DBaudit, and_(model.audit.DBaudit.finding_id == model.finding.DBfinding.id_,
model.audit.DBaudit.id_ == max_audit_subquery.c.audit_id),
isouter=True)
if scan_ids and len(scan_ids) > 0:
query = query \
.join(model.DBscanFinding,
model.scan_finding.DBscanFinding.finding_id == model.finding.DBfinding.id_) \
.join(model.DBscan,
model.scan.DBscan.id_ == model.scan_finding.DBscanFinding.scan_id) \
.filter(model.DBscan.id_.in_(scan_ids))
if finding_statuses:
if FindingStatus.NOT_ANALYZED in finding_statuses:
query = query. \
filter(or_(model.DBaudit.status.in_(finding_statuses),
model.DBaudit.status == None)) # noqa: E711
else:
query = query.filter(model.DBaudit.status.in_(finding_statuses))
if rule_name:
query = query.filter(model.DBfinding.rule_name == rule_name)
findings_count_by_status = query.group_by(model.DBaudit.status).all()
return findings_count_by_status
def get_rule_findings_count_by_status(db_connection: Session, rule_pack_versions: [str] = None,
rule_tags: [str] = None):
"""
Retrieve count of findings based on rulename and status
:param db_connection:
Session of the database connection
:param rule_pack_versions:
optional, filter on rule pack version
:param rule_tags:
optional, filter on rule tag
:return: findings_count
per rulename and status the count of findings
"""
query = db_connection.query(model.DBfinding.rule_name,
model.DBaudit.status,
func.count(model.DBfinding.id_))
max_base_scan_subquery = db_connection.query(model.DBscan.repository_id,
func.max(model.DBscan.id_).label("latest_base_scan_id"))
max_base_scan_subquery = max_base_scan_subquery.filter(model.DBscan.scan_type == ScanType.BASE)
if rule_pack_versions:
max_base_scan_subquery = max_base_scan_subquery.filter(model.DBscan.rule_pack.in_(rule_pack_versions))
max_base_scan_subquery = max_base_scan_subquery.group_by(model.DBscan.repository_id).subquery()
max_audit_subquery = db_connection.query(model.DBaudit.finding_id,
func.max(model.DBaudit.id_).label("audit_id")) \
.group_by(model.DBaudit.finding_id).subquery()
query = query.join(model.DBscanFinding, model.DBfinding.id_ == model.DBscanFinding.finding_id)
query = query.join(max_base_scan_subquery, model.DBfinding.repository_id == max_base_scan_subquery.c.repository_id)
query = query.join(model.DBscan, and_(model.DBscanFinding.scan_id == model.DBscan.id_,
model.DBscan.id_ >= max_base_scan_subquery.c.latest_base_scan_id))
if rule_tags:
rule_tag_subquery = db_connection.query(model.DBruleTag.rule_id) \
.join(model.DBtag, model.DBruleTag.tag_id == model.DBtag.id_)
if rule_pack_versions:
rule_tag_subquery = rule_tag_subquery.join(model.DBrule, model.DBrule.id_ == model.DBruleTag.rule_id)
rule_tag_subquery = rule_tag_subquery.filter(model.DBrule.rule_pack.in_(rule_pack_versions))
rule_tag_subquery = rule_tag_subquery.filter(model.DBtag.name.in_(rule_tags))
rule_tag_subquery = rule_tag_subquery.group_by(model.DBruleTag.rule_id).subquery()
query = query.join(model.DBrule, and_(model.DBrule.rule_name == model.DBfinding.rule_name,
model.DBrule.rule_pack == model.DBscan.rule_pack))
query = query.join(rule_tag_subquery, model.DBrule.id_ == rule_tag_subquery.c.rule_id)
if rule_pack_versions:
query = query.filter(model.DBscan.rule_pack.in_(rule_pack_versions))
query = query.join(max_audit_subquery, max_audit_subquery.c.finding_id == model.DBscanFinding.finding_id,
isouter=True)
query = query.join(model.DBaudit, and_(model.audit.DBaudit.finding_id == model.DBscanFinding.finding_id,
model.audit.DBaudit.id_ == max_audit_subquery.c.audit_id), isouter=True)
query = query.group_by(model.DBfinding.rule_name, model.DBaudit.status)
query = query.order_by(model.DBfinding.rule_name, model.DBaudit.status)
status_counts = query.all()
rule_count_dict = {}
for status_count in status_counts:
rule_count_dict[status_count[0]] = {
"true_positive": 0,
"false_positive": 0,
"not_analyzed": 0,
"under_review": 0,
"clarification_required": 0,
"total_findings_count": 0
}
for status_count in status_counts:
rule_count_dict[status_count[0]]["total_findings_count"] += status_count[2]
if status_count[1] == FindingStatus.NOT_ANALYZED or status_count[1] is None:
rule_count_dict[status_count[0]]["not_analyzed"] += status_count[2]
elif status_count[1] == FindingStatus.FALSE_POSITIVE:
rule_count_dict[status_count[0]]["false_positive"] += status_count[2]
elif status_count[1] == FindingStatus.TRUE_POSITIVE:
rule_count_dict[status_count[0]]["true_positive"] += status_count[2]
elif status_count[1] == FindingStatus.UNDER_REVIEW:
rule_count_dict[status_count[0]]["under_review"] += status_count[2]
elif status_count[1] == FindingStatus.CLARIFICATION_REQUIRED:
rule_count_dict[status_count[0]]["clarification_required"] += status_count[2]
return rule_count_dict
def get_findings_count_by_time(db_connection: Session,
date_type: DateFilter,
start_date_time: datetime = None,
end_date_time: datetime = None,
skip: int = 0,
limit: int = DEFAULT_RECORDS_PER_PAGE_LIMIT):
"""
Retrieve count based on date_type
:param db_connection:
Session of the database connection
:param date_type:
required, filter on time_type
:param start_date_time:
optional, filter on start date
:param end_date_time:
optional, filter on end date
:param skip:
integer amount of records to skip to support pagination
:param limit:
integer amount of records to return, to support pagination
"""
if date_type == DateFilter.MONTH:
query = db_connection.query(extract('year', model.DBscan.timestamp), extract('month', model.DBscan.timestamp),
func.count(model.DBscanFinding.finding_id))
elif date_type == DateFilter.WEEK:
query = db_connection.query(extract('year', model.DBscan.timestamp), extract('week', model.DBscan.timestamp),
func.count(model.DBscanFinding.finding_id))
elif date_type == DateFilter.DAY:
query = db_connection.query(extract('year', model.DBscan.timestamp), extract('month', model.DBscan.timestamp),
extract('day', model.DBscan.timestamp), func.count(model.DBscanFinding.finding_id))
query = query.join(model.DBscanFinding, model.DBscanFinding.scan_id == model.DBscan.id_)
if start_date_time:
query = query.filter(model.DBscan.timestamp >= start_date_time)
if end_date_time:
query = query.filter(model.DBscan.timestamp <= end_date_time)
if date_type == DateFilter.MONTH:
query = query.group_by(extract('year', model.DBscan.timestamp), extract('month', model.DBscan.timestamp))
query = query.order_by(extract('year', model.DBscan.timestamp), extract('month', model.DBscan.timestamp))
elif date_type == DateFilter.WEEK:
query = query.group_by(extract('year', model.DBscan.timestamp), extract('week', model.DBscan.timestamp))
query = query.order_by(extract('year', model.DBscan.timestamp), extract('week', model.DBscan.timestamp))
elif date_type == DateFilter.DAY:
query = query.group_by(extract('year', model.DBscan.timestamp), extract('month', model.DBscan.timestamp),
extract('day', model.DBscan.timestamp))
query = query.order_by(extract('year', model.DBscan.timestamp), extract('month', model.DBscan.timestamp),
extract('day', model.DBscan.timestamp))
finding_count = query.offset(skip).limit(limit).all()
return finding_count
def get_findings_count_by_time_total(db_connection: Session,
date_type: DateFilter,
start_date_time: datetime = None,
end_date_time: datetime = None):
"""
Retrieve total count on date_type
:param db_connection:
Session of the database connection
:param date_type:
required, filter on time_type
:param start_date_time:
optional, filter on start date
:param end_date_time:
optional, filter on end date
"""
if date_type == DateFilter.MONTH:
query = db_connection.query(extract('year', model.DBscan.timestamp), extract('month', model.DBscan.timestamp))
elif date_type == DateFilter.WEEK:
query = db_connection.query(extract('year', model.DBscan.timestamp), extract('week', model.DBscan.timestamp))
elif date_type == DateFilter.DAY:
query = db_connection.query(extract('year', model.DBscan.timestamp), extract('month', model.DBscan.timestamp),
extract('day', model.DBscan.timestamp))
if start_date_time:
query = query.filter(model.DBscan.timestamp >= start_date_time)
if end_date_time:
query = query.filter(model.DBscan.timestamp <= end_date_time)
query = query.distinct()
result = query.count()
return result
def get_distinct_rules_from_scans(db_connection: Session, scan_ids: List[int] = None) -> \
List[model.DBrule]:
"""
Retrieve distinct rules detected
:param db_connection:
Session of the database connection
:param scan_ids:
List of scan ids
:return: rules
List of unique rules
"""
query = db_connection.query(model.DBfinding.rule_name)
if scan_ids:
query = query.join(model.DBscanFinding,
model.scan_finding.DBscanFinding.finding_id == model.finding.DBfinding.id_)
query = query.filter(model.DBscanFinding.scan_id.in_(scan_ids))
rules = query.distinct().order_by(model.DBfinding.rule_name).all()
return rules
def delete_finding(db_connection: Session, finding_id: int, delete_related: bool = False):
"""
Delete a finding object
:param db_connection:
Session of the database connection
:param finding_id:
id of the finding to be deleted
:param delete_related:
if related records need to be deleted
"""
if delete_related:
scan_finding_crud.delete_scan_finding(db_connection, finding_id=finding_id)
db_connection.query(model.DBfinding) \
.filter(model.finding.DBfinding.id_ == finding_id) \
.delete(synchronize_session=False)
db_connection.commit()
def delete_findings_by_repository_id(db_connection: Session, repository_id: int):
"""
Delete findings for a given repository
:param db_connection:
Session of the database connection
:param repository_id:
id of the repository
"""
db_connection.query(model.DBfinding) \
.filter(model.finding.DBfinding.repository_id == repository_id) \
.delete(synchronize_session=False)
db_connection.commit()
def delete_findings_by_vcs_instance_id(db_connection: Session, vcs_instance_id: int):
"""
Delete findings for a given vcs instance
:param db_connection:
Session of the database connection
:param vcs_instance_id:
id of the vcs instance
"""
db_connection.query(model.DBfinding) \
.filter(model.finding.DBfinding.repository_id == model.repository.DBrepository.id_,
model.repository.DBrepository.vcs_instance == model.vcs_instance.DBVcsInstance.id_,
model.vcs_instance.DBVcsInstance.id_ == vcs_instance_id) \
.delete(synchronize_session=False)
db_connection.commit()
def get_finding_audit_status_count_over_time(db_connection: Session, status: FindingStatus, weeks: int = 13) -> dict:
"""
Retrieve count of true positive findings over time for given weeks
:param db_connection:
Session of the database connection
:param status:
mandatory, status for which to get the audit counts over time
:param weeks:
optional, filter on last n weeks, default 13
:return: true_positive_count_over_time
list of rows containing finding statuses count over time per week
"""
all_tables = []
for week in range(0, weeks):
last_nth_week_date_time = datetime.utcnow() - timedelta(weeks=week)
query = db_connection.query(extract('year', last_nth_week_date_time).label("year"),
extract('week', last_nth_week_date_time).label("week"),
model.DBVcsInstance.provider_type.label("provider_type"),
func.count(model.DBaudit.id_).label("finding_count")
)
max_audit_subquery = db_connection.query(func.max(model.DBaudit.id_).label("audit_id")) \
.filter(extract('year', model.DBaudit.timestamp) == extract('year', last_nth_week_date_time)) \
.filter(extract('week', model.DBaudit.timestamp) <= extract('week', last_nth_week_date_time)) \
.group_by(model.DBaudit.finding_id).subquery()
query = query.join(max_audit_subquery, max_audit_subquery.c.audit_id == model.DBaudit.id_)
query = query.join(model.DBfinding, model.DBfinding.id_ == model.DBaudit.finding_id)
query = query.join(model.DBrepository, model.DBrepository.id_ == model.DBfinding.repository_id)
query = query.join(model.DBVcsInstance, model.DBVcsInstance.id_ == model.DBrepository.vcs_instance)
query = query.filter(model.DBaudit.status == status)
query = query.group_by(model.DBVcsInstance.provider_type)
all_tables.append(query)
# union
unioned_query = union(*all_tables)
status_count_over_time = db_connection.execute(unioned_query).all()
return status_count_over_time
def get_finding_count_by_vcs_provider_over_time(db_connection: Session, weeks: int = 13) -> list[Row]:
"""
Retrieve count findings over time for given weeks
:param db_connection:
Session of the database connection
:param weeks:
optional, filter on last n weeks, default 13
:return: count_over_time
list of rows containing finding count over time per week
"""
all_tables = []
for week in range(0, weeks):
last_nth_week_date_time = datetime.utcnow() - timedelta(weeks=week)
query = db_connection.query(extract('year', last_nth_week_date_time).label("year"),
extract('week', last_nth_week_date_time).label("week"),
model.DBVcsInstance.provider_type.label("provider_type"),
func.count(model.DBfinding.id_).label("finding_count")
)
max_base_scan = db_connection.query(func.max(model.DBscan.id_).label("scan_id"),
model.DBscan.repository_id) \
.filter(extract('year', model.DBscan.timestamp) == extract('year', last_nth_week_date_time)) \
.filter(extract('week', model.DBscan.timestamp) <= extract('week', last_nth_week_date_time)) \
.filter(model.DBscan.scan_type == ScanType.BASE) \
.group_by(model.DBscan.repository_id).subquery()
query = query.join(model.DBscanFinding, model.DBfinding.id_ == model.DBscanFinding.finding_id)
query = query.join(model.DBscan, model.DBscan.id_ == model.DBscanFinding.scan_id)
query = query.join(max_base_scan, and_(max_base_scan.c.repository_id == model.DBscan.repository_id,
or_(model.DBscan.id_ == max_base_scan.c.scan_id,
(and_(model.DBscan.id_ > max_base_scan.c.scan_id,
model.DBscan.scan_type == ScanType.INCREMENTAL,
extract('week', model.DBscan.timestamp) <=
extract('week', last_nth_week_date_time),
extract('year', model.DBscan.timestamp) ==
extract('year', last_nth_week_date_time)))
)
)
)
query = query.join(model.DBrepository, model.DBrepository.id_ == model.DBscan.repository_id)
query = query.join(model.DBVcsInstance, model.DBVcsInstance.id_ == model.DBrepository.vcs_instance)
query = query.group_by(model.DBVcsInstance.provider_type)
all_tables.append(query)
# union
unioned_query = union(*all_tables)
count_over_time = db_connection.execute(unioned_query).all()
return count_over_time
def get_un_triaged_finding_count_by_vcs_provider_over_time(db_connection: Session, weeks: int = 13) -> list[Row]:
"""
Retrieve count of un triaged findings over time for given weeks
:param db_connection:
Session of the database connection
:param weeks:
optional, filter on last n weeks, default 13
:return: count_over_time
list of rows containing un triaged findings count over time per week
"""
all_tables = []
for week in range(0, weeks):
last_nth_week_date_time = datetime.utcnow() - timedelta(weeks=week)
query = db_connection.query(extract('year', last_nth_week_date_time).label("year"),
extract('week', last_nth_week_date_time).label("week"),
model.DBVcsInstance.provider_type.label("provider_type"),
func.count(model.DBfinding.id_).label("finding_count")
)
max_base_scan = db_connection.query(func.max(model.DBscan.id_).label("scan_id"),
model.DBscan.repository_id) \
.filter(extract('year', model.DBscan.timestamp) == extract('year', last_nth_week_date_time)) \
.filter(extract('week', model.DBscan.timestamp) <= extract('week', last_nth_week_date_time)) \
.filter(model.DBscan.scan_type == ScanType.BASE) \
.group_by(model.DBscan.repository_id).subquery()
max_audit_subquery = db_connection.query(model.DBaudit.finding_id,
func.max(model.DBaudit.id_).label("audit_id")) \
.filter(extract('year', model.DBaudit.timestamp) == extract('year', last_nth_week_date_time)) \
.filter(extract('week', model.DBaudit.timestamp) <= extract('week', last_nth_week_date_time)) \
.group_by(model.DBaudit.finding_id).subquery()
query = query.join(model.DBscanFinding, model.DBfinding.id_ == model.DBscanFinding.finding_id)
query = query.join(model.DBscan, model.DBscan.id_ == model.DBscanFinding.scan_id)
query = query.join(max_base_scan, and_(max_base_scan.c.repository_id == model.DBscan.repository_id,
or_(model.DBscan.id_ == max_base_scan.c.scan_id,
(and_(model.DBscan.id_ > max_base_scan.c.scan_id,
model.DBscan.scan_type == ScanType.INCREMENTAL,
extract('week', model.DBscan.timestamp) <=
extract('week', last_nth_week_date_time),
extract('year', model.DBscan.timestamp) ==
extract('year', last_nth_week_date_time)))
)
)
)
query = query.join(model.DBrepository, model.DBrepository.id_ == model.DBscan.repository_id)
query = query.join(model.DBVcsInstance, model.DBVcsInstance.id_ == model.DBrepository.vcs_instance)
query = query.join(max_audit_subquery, max_audit_subquery.c.finding_id == model.finding.DBfinding.id_,
isouter=True)
query = query.join(model.DBaudit, and_(model.audit.DBaudit.finding_id == model.finding.DBfinding.id_,
model.audit.DBaudit.id_ == max_audit_subquery.c.audit_id),
isouter=True)
query = query.filter(
or_(model.DBaudit.id_ == None, model.DBaudit.status == FindingStatus.NOT_ANALYZED)) # noqa: E711
query = query.group_by(model.DBVcsInstance.provider_type)
all_tables.append(query)
# union
unioned_query = union(*all_tables)
count_over_time = db_connection.execute(unioned_query).all()
return count_over_time | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/crud/finding.py | 0.723798 | 0.157882 | finding.py | pypi |
from sqlalchemy import and_, func
from sqlalchemy.orm import Session
# First Party
from resc_backend.constants import DEFAULT_RECORDS_PER_PAGE_LIMIT, MAX_RECORDS_PER_PAGE_LIMIT
from resc_backend.db import model
from resc_backend.resc_web_service.crud import finding as finding_crud
from resc_backend.resc_web_service.crud import scan as scan_crud
from resc_backend.resc_web_service.crud import scan_finding as scan_finding_crud
from resc_backend.resc_web_service.schema import repository as repository_schema
from resc_backend.resc_web_service.schema.finding_status import FindingStatus
from resc_backend.resc_web_service.schema.scan_type import ScanType
from resc_backend.resc_web_service.schema.vcs_provider import VCSProviders
def get_repositories(db_connection: Session, vcs_providers: [VCSProviders] = None, skip: int = 0,
limit: int = DEFAULT_RECORDS_PER_PAGE_LIMIT, project_filter: str = "",
repository_filter: str = "", only_if_has_findings: bool = False):
"""
Retrieve repository records optionally filtered
:param db_connection:
Session of the database connection
:param skip:
integer amount of records to skip to support pagination
:param limit:
integer amount of records to return, to support pagination
:param vcs_providers:
optional [string] filtering the VCS provider
:param project_filter:
optional, filter on project name. Is used as a string contains filter
:param repository_filter:
optional, filter on repository name. Is used as a string contains filter
:param only_if_has_findings:
optional, filter on repositories with findings
:return: repositories
list of DBrepository objects
"""
limit_val = MAX_RECORDS_PER_PAGE_LIMIT if limit > MAX_RECORDS_PER_PAGE_LIMIT else limit
# Get the latest scan for repository
repo_last_scan_sub_query = db_connection.query(model.DBscan.repository_id,
func.max(model.DBscan.timestamp).label("max_timestamp"))
repo_last_scan_sub_query = repo_last_scan_sub_query.group_by(model.DBscan.repository_id).subquery()
query = db_connection.query(
model.DBrepository.id_,
model.DBrepository.project_key,
model.DBrepository.repository_id,
model.DBrepository.repository_name,
model.DBrepository.repository_url,
model.DBrepository.vcs_instance,
model.DBVcsInstance.provider_type,
func.coalesce(model.DBscan.id_, None).label('last_scan_id'),
func.coalesce(model.DBscan.timestamp, None).label('last_scan_timestamp'))
query = query.join(model.DBVcsInstance,
model.vcs_instance.DBVcsInstance.id_ == model.repository.DBrepository.vcs_instance)
query = query.join(repo_last_scan_sub_query,
model.repository.DBrepository.id_ == repo_last_scan_sub_query.c.repository_id, isouter=True)
query = query.join(model.DBscan,
and_(model.scan.DBscan.repository_id == repo_last_scan_sub_query.c.repository_id,
model.scan.DBscan.timestamp == repo_last_scan_sub_query.c.max_timestamp), isouter=True)
if only_if_has_findings:
max_base_scan_subquery = db_connection.query(model.DBscan.repository_id,
func.max(model.DBscan.id_).label("latest_base_scan_id"))
max_base_scan_subquery = max_base_scan_subquery.filter(model.DBscan.scan_type == ScanType.BASE)
max_base_scan_subquery = max_base_scan_subquery.group_by(model.DBscan.repository_id).subquery()
sub_query = db_connection.query(model.DBrepository.id_)
sub_query = sub_query.join(max_base_scan_subquery,
model.DBrepository.id_ == max_base_scan_subquery.c.repository_id)
sub_query = sub_query.join(model.DBscan, and_(model.DBrepository.id_ == model.DBscan.repository_id,
model.DBscan.id_ >= max_base_scan_subquery.c.latest_base_scan_id))
sub_query = sub_query.join(model.DBscanFinding, model.DBscan.id_ == model.DBscanFinding.scan_id)
sub_query = sub_query.distinct()
# Filter on repositories that are in the selection
query = query.filter(model.DBrepository.id_.in_(sub_query))
if vcs_providers and vcs_providers is not None:
query = query.filter(model.DBVcsInstance.provider_type.in_(vcs_providers))
if project_filter:
query = query.filter(model.DBrepository.project_key == project_filter)
if repository_filter:
query = query.filter(model.DBrepository.repository_name == repository_filter)
repositories = query.order_by(model.DBrepository.id_).offset(skip).limit(limit_val).all()
return repositories
def get_repositories_count(db_connection: Session, vcs_providers: [VCSProviders] = None, project_filter: str = "",
repository_filter: str = "", only_if_has_findings: bool = False) -> int:
"""
Retrieve count of repository records optionally filtered
:param db_connection:
Session of the database connection
:param vcs_providers:
optional [string] filtering the VCS provider
:param project_filter:
optional, filter on project name
:param repository_filter:
optional, filter on repository name
:param only_if_has_findings:
optional, filter on repositories with findings
:return: total_count
count of repositories
"""
query = db_connection.query(func.count(model.DBrepository.id_))
if only_if_has_findings:
max_base_scan_subquery = db_connection.query(model.DBscan.repository_id,
func.max(model.DBscan.id_).label("latest_base_scan_id"))
max_base_scan_subquery = max_base_scan_subquery.filter(model.DBscan.scan_type == ScanType.BASE)
max_base_scan_subquery = max_base_scan_subquery.group_by(model.DBscan.repository_id).subquery()
sub_query = db_connection.query(model.DBrepository.id_)
sub_query = sub_query.join(max_base_scan_subquery,
model.DBrepository.id_ == max_base_scan_subquery.c.repository_id)
sub_query = sub_query.join(model.DBscan, and_(model.DBrepository.id_ == model.DBscan.repository_id,
model.DBscan.id_ >= max_base_scan_subquery.c.latest_base_scan_id))
sub_query = sub_query.join(model.DBscanFinding, model.DBscan.id_ == model.DBscanFinding.scan_id)
sub_query = sub_query.distinct()
# Filter on repositories that are in the selection
query = query.filter(model.DBrepository.id_.in_(sub_query))
if vcs_providers and vcs_providers is not None:
query = query.join(model.DBVcsInstance,
model.vcs_instance.DBVcsInstance.id_ == model.repository.DBrepository.vcs_instance)
query = query.filter(model.DBVcsInstance.provider_type.in_(vcs_providers))
if project_filter:
query = query.filter(model.DBrepository.project_key == project_filter)
if repository_filter:
query = query.filter(model.DBrepository.repository_name == repository_filter)
total_count = query.scalar()
return total_count
def get_repository(db_connection: Session, repository_id: int):
repository = db_connection.query(model.DBrepository) \
.filter(model.repository.DBrepository.id_ == repository_id).first()
return repository
def update_repository(
db_connection: Session, repository_id: int, repository: repository_schema.RepositoryCreate):
db_repository = db_connection.query(model.DBrepository).filter_by(id_=repository_id).first()
db_repository.repository_name = repository.repository_name
db_repository.repository_url = repository.repository_url
db_repository.vcs_instance = repository.vcs_instance
db_connection.commit()
db_connection.refresh(db_repository)
return db_repository
def create_repository(db_connection: Session, repository: repository_schema.RepositoryCreate):
db_repository = model.repository.DBrepository(
project_key=repository.project_key,
repository_id=repository.repository_id,
repository_name=repository.repository_name,
repository_url=repository.repository_url,
vcs_instance=repository.vcs_instance
)
db_connection.add(db_repository)
db_connection.commit()
db_connection.refresh(db_repository)
return db_repository
def create_repository_if_not_exists(db_connection: Session,
repository: repository_schema.RepositoryCreate):
# Query the database to see if the repository object exists based on the unique constraint parameters
db_select_repository = db_connection.query(model.DBrepository) \
.filter(model.repository.DBrepository.project_key == repository.project_key,
model.repository.DBrepository.repository_id == repository.repository_id,
model.repository.DBrepository.vcs_instance == repository.vcs_instance).first()
if db_select_repository is not None:
return db_select_repository
# Create non-existing repository object
return create_repository(db_connection, repository)
def get_distinct_projects(db_connection: Session, vcs_providers: [VCSProviders] = None, repository_filter: str = "",
only_if_has_findings: bool = False):
"""
Retrieve all unique project names
:param db_connection:
Session of the database connection
:param vcs_providers:
optional, filter of supported vcs provider types
:param repository_filter:
optional, filter on repository name. Is used as a string contains filter
:param only_if_has_findings:
optional, filter on repositories that have findings
:return: distinct_projects
The output will contain a list of unique projects
"""
query = db_connection.query(model.DBrepository.project_key)
if only_if_has_findings:
max_base_scan_subquery = db_connection.query(model.DBscan.repository_id,
func.max(model.DBscan.id_).label("latest_base_scan_id"))
max_base_scan_subquery = max_base_scan_subquery.filter(model.DBscan.scan_type == ScanType.BASE)
max_base_scan_subquery = max_base_scan_subquery.group_by(model.DBscan.repository_id).subquery()
query = query.join(max_base_scan_subquery, model.DBrepository.id_ == max_base_scan_subquery.c.repository_id)
query = query.join(model.DBscan, and_(model.DBrepository.id_ == model.DBscan.repository_id,
model.DBscan.id_ >= max_base_scan_subquery.c.latest_base_scan_id))
query = query.join(model.DBscanFinding, model.DBscan.id_ == model.DBscanFinding.scan_id)
if vcs_providers and vcs_providers is not None:
query = query.join(model.DBVcsInstance,
model.vcs_instance.DBVcsInstance.id_ == model.repository.DBrepository.vcs_instance)
query = query.filter(model.DBVcsInstance.provider_type.in_(vcs_providers))
if repository_filter:
query = query.filter(model.DBrepository.repository_name == repository_filter)
distinct_projects = query.distinct().all()
return distinct_projects
def get_distinct_repositories(db_connection: Session, vcs_providers: [VCSProviders] = None, project_name: str = "",
only_if_has_findings: bool = False):
"""
Retrieve all unique repository names
:param db_connection:
Session of the database connection
:param vcs_providers:
optional, filter of supported vcs provider types
:param project_name:
optional, filter on project name. Is used as a full string match filter
:param only_if_has_findings:
optional, filter on repositories that have findings
:return: distinct_repositories
The output will contain a list of unique repositories
"""
query = db_connection.query(model.DBrepository.repository_name)
if only_if_has_findings:
max_base_scan_subquery = db_connection.query(model.DBscan.repository_id,
func.max(model.DBscan.id_).label("latest_base_scan_id"))
max_base_scan_subquery = max_base_scan_subquery.filter(model.DBscan.scan_type == ScanType.BASE)
max_base_scan_subquery = max_base_scan_subquery.group_by(model.DBscan.repository_id).subquery()
query = query.join(max_base_scan_subquery, model.DBrepository.id_ == max_base_scan_subquery.c.repository_id)
query = query.join(model.DBscan, and_(model.DBrepository.id_ == model.DBscan.repository_id,
model.DBscan.id_ >= max_base_scan_subquery.c.latest_base_scan_id))
query = query.join(model.DBscanFinding, model.DBscan.id_ == model.DBscanFinding.scan_id)
if vcs_providers and vcs_providers is not None:
query = query.join(model.DBVcsInstance,
model.vcs_instance.DBVcsInstance.id_ == model.repository.DBrepository.vcs_instance)
query = query.filter(model.DBVcsInstance.provider_type.in_(vcs_providers))
if project_name:
query = query.filter(model.DBrepository.project_key == project_name)
distinct_repositories = query.distinct().all()
return distinct_repositories
def get_findings_metadata_by_repository_id(db_connection: Session, repository_ids: list[int]):
"""
Retrieves the finding metadata for a repository id from the database with most recent scan information
:param db_connection:
Session of the database connection
:param repository_ids:
ids of the repository for which findings metadata to be retrieved
:return: findings_metadata
findings_metadata containing the count for each status
"""
query = db_connection.query(model.DBrepository.id_,
model.DBaudit.status,
func.count(model.DBscanFinding.finding_id))
max_base_scan_subquery = db_connection.query(model.DBscan.repository_id,
func.max(model.DBscan.id_).label("latest_base_scan_id"))
max_base_scan_subquery = max_base_scan_subquery.filter(model.DBscan.scan_type == ScanType.BASE)
max_base_scan_subquery = max_base_scan_subquery.group_by(model.DBscan.repository_id).subquery()
max_audit_subquery = db_connection.query(model.DBaudit.finding_id,
func.max(model.DBaudit.id_).label("audit_id")) \
.group_by(model.DBaudit.finding_id).subquery()
query = query.join(max_base_scan_subquery, model.DBrepository.id_ == max_base_scan_subquery.c.repository_id)
query = query.join(model.DBscan, and_(model.DBrepository.id_ == model.DBscan.repository_id,
model.DBscan.id_ >= max_base_scan_subquery.c.latest_base_scan_id))
query = query.join(model.DBscanFinding, model.DBscan.id_ == model.DBscanFinding.scan_id)
query = query.join(max_audit_subquery, max_audit_subquery.c.finding_id == model.DBscanFinding.finding_id,
isouter=True)
query = query.join(model.DBaudit, and_(model.audit.DBaudit.finding_id == model.DBscanFinding.finding_id,
model.audit.DBaudit.id_ == max_audit_subquery.c.audit_id), isouter=True)
query = query.filter(model.DBrepository.id_.in_(repository_ids))
query = query.group_by(model.DBrepository.id_, model.DBaudit.status, )
status_counts = query.all()
repo_count_dict = {}
for repository_id in repository_ids:
repo_count_dict[repository_id] = {
"true_positive": 0,
"false_positive": 0,
"not_analyzed": 0,
"under_review": 0,
"clarification_required": 0,
"total_findings_count": 0
}
for status_count in status_counts:
repo_count_dict[status_count[0]]["total_findings_count"] += status_count[2]
if status_count[1] == FindingStatus.NOT_ANALYZED or status_count[1] is None:
repo_count_dict[status_count[0]]["not_analyzed"] += status_count[2]
elif status_count[1] == FindingStatus.FALSE_POSITIVE:
repo_count_dict[status_count[0]]["false_positive"] += status_count[2]
elif status_count[1] == FindingStatus.TRUE_POSITIVE:
repo_count_dict[status_count[0]]["true_positive"] += status_count[2]
elif status_count[1] == FindingStatus.UNDER_REVIEW:
repo_count_dict[status_count[0]]["under_review"] += status_count[2]
elif status_count[1] == FindingStatus.CLARIFICATION_REQUIRED:
repo_count_dict[status_count[0]]["clarification_required"] += status_count[2]
return repo_count_dict
def delete_repository(db_connection: Session, repository_id: int, delete_related: bool = False):
"""
Delete a repository object
:param db_connection:
Session of the database connection
:param repository_id:
id of the repository to be deleted
:param delete_related:
if related records need to be deleted
"""
if delete_related:
scan_finding_crud.delete_scan_finding_by_repository_id(db_connection, repository_id=repository_id)
finding_crud.delete_findings_by_repository_id(db_connection, repository_id=repository_id)
scan_crud.delete_scans_by_repository_id(db_connection, repository_id=repository_id)
db_connection.query(model.DBrepository) \
.filter(model.repository.DBrepository.id_ == repository_id) \
.delete(synchronize_session=False)
db_connection.commit()
def delete_repositories_by_vcs_instance_id(db_connection: Session, vcs_instance_id: int):
"""
Delete repositories for a given vcs instance
:param db_connection:
Session of the database connection
:param vcs_instance_id:
id of the vcs instance
"""
db_connection.query(model.DBrepository) \
.filter(model.repository.DBrepository.vcs_instance == model.vcs_instance.DBVcsInstance.id_,
model.vcs_instance.DBVcsInstance.id_ == vcs_instance_id) \
.delete(synchronize_session=False)
db_connection.commit() | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/crud/repository.py | 0.640186 | 0.154599 | repository.py | pypi |
from typing import List
# First Party
from resc_backend.db import model
from resc_backend.db.connection import Session
from resc_backend.db.model import DBscanFinding
def create_scan_findings(db_connection: Session, scan_findings: List[DBscanFinding]) -> int:
if len(scan_findings) < 1:
# Function is called with an empty list of findings
return 0
# load existing scan findings for this scan into the session
scan_id = scan_findings[0].scan_id
_ = db_connection.query(model.DBscanFinding).filter(DBscanFinding.scan_id == scan_id).all()
# merge the new scan findings into the session, ignoring duplicates
for scan_finding in scan_findings:
db_connection.merge(scan_finding)
db_connection.commit()
return len(scan_findings)
def get_scan_findings(db_connection: Session, finding_id: int) -> List[DBscanFinding]:
scan_findings = db_connection.query(model.DBscanFinding)
scan_findings = scan_findings.filter(model.scan_finding.DBscanFinding.finding_id == finding_id).all()
return scan_findings
def delete_scan_finding(db_connection: Session, finding_id: int = None, scan_id: int = None):
"""
Delete scan findings when finding id or scan id provided
:param db_connection:
Session of the database connection
:param finding_id:
optional, id of the finding
:param scan_id:
optional, id of the scan
"""
if finding_id or scan_id:
query = db_connection.query(model.DBscanFinding)
if finding_id:
query = query.filter(model.scan_finding.DBscanFinding.finding_id == finding_id)
if scan_id:
query = query.filter(model.scan_finding.DBscanFinding.scan_id == scan_id)
query.delete(synchronize_session=False)
db_connection.commit()
def delete_scan_finding_by_repository_id(db_connection: Session, repository_id: int):
"""
Delete scan findings for a given repository
:param db_connection:
Session of the database connection
:param repository_id:
id of the repository
"""
db_connection.query(model.DBscanFinding) \
.filter(model.scan_finding.DBscanFinding.scan_id == model.scan.DBscan.id_,
model.scan_finding.DBscanFinding.finding_id == model.finding.DBfinding.id_,
model.scan.DBscan.repository_id == model.finding.DBfinding.repository_id,
model.scan.DBscan.repository_id == repository_id) \
.delete(synchronize_session=False)
db_connection.commit()
def delete_scan_finding_by_vcs_instance_id(db_connection: Session, vcs_instance_id: int):
"""
Delete scan findings for a given vcs instance
:param db_connection:
Session of the database connection
:param vcs_instance_id:
id of the vcs instance
"""
db_connection.query(model.DBscanFinding) \
.filter(model.scan_finding.DBscanFinding.scan_id == model.scan.DBscan.id_,
model.scan_finding.DBscanFinding.finding_id == model.finding.DBfinding.id_,
model.scan.DBscan.repository_id == model.repository.DBrepository.id_,
model.repository.DBrepository.vcs_instance == model.vcs_instance.DBVcsInstance.id_,
model.vcs_instance.DBVcsInstance.id_ == vcs_instance_id) \
.delete(synchronize_session=False)
db_connection.commit() | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/crud/scan_finding.py | 0.81231 | 0.31093 | scan_finding.py | pypi |
from typing import List
# Third Party
from sqlalchemy import and_, func, or_
from sqlalchemy.orm import Session
# First Party
from resc_backend.constants import DEFAULT_RECORDS_PER_PAGE_LIMIT, MAX_RECORDS_PER_PAGE_LIMIT
from resc_backend.db import model
from resc_backend.resc_web_service.filters import FindingsFilter
from resc_backend.resc_web_service.schema import detailed_finding as detailed_finding_schema
from resc_backend.resc_web_service.schema.finding_status import FindingStatus
from resc_backend.resc_web_service.schema.scan_type import ScanType
def get_detailed_findings(db_connection: Session, findings_filter: FindingsFilter, skip: int = 0,
limit: int = DEFAULT_RECORDS_PER_PAGE_LIMIT
) -> List[detailed_finding_schema.DetailedFindingRead]:
"""
Retrieve all detailed findings objects matching the provided FindingsFilter
:param findings_filter:
Object of type FindingsFilter, only DetailedFindingRead objects matching the attributes in this filter will be
fetched
:param db_connection:
Session of the database connection
:param skip:
integer amount of records to skip to support pagination
:param limit:
integer amount of records to return, to support pagination
:return: [DetailedFindingRead]
The output will contain a list of DetailedFindingRead objects,
or an empty list if no finding was found for the given findings_filter
"""
max_base_scan_subquery = db_connection.query(model.DBscan.repository_id,
func.max(model.DBscan.id_).label("latest_base_scan_id"))
max_base_scan_subquery = max_base_scan_subquery.filter(model.DBscan.scan_type == ScanType.BASE)
if findings_filter.rule_pack_versions:
max_base_scan_subquery = max_base_scan_subquery.filter(
model.DBscan.rule_pack.in_(findings_filter.rule_pack_versions))
max_base_scan_subquery = max_base_scan_subquery.group_by(model.DBscan.repository_id).subquery()
# subquery to select latest audit ids of findings
max_audit_subquery = db_connection.query(model.DBaudit.finding_id,
func.max(model.DBaudit.id_).label("audit_id")) \
.group_by(model.DBaudit.finding_id).subquery()
rule_tag_subquery = db_connection.query(model.DBruleTag.rule_id) \
.join(model.DBtag, model.DBruleTag.tag_id == model.DBtag.id_)
if findings_filter.rule_tags:
rule_tag_subquery = rule_tag_subquery.filter(model.DBtag.name.in_(findings_filter.rule_tags))
if findings_filter.rule_pack_versions or findings_filter.rule_names:
rule_tag_subquery = rule_tag_subquery.join(model.DBrule, model.DBrule.id_ == model.DBruleTag.rule_id)
if findings_filter.rule_pack_versions:
rule_tag_subquery = rule_tag_subquery.filter(model.DBrule.rule_pack.in_(findings_filter.rule_pack_versions))
if findings_filter.rule_names:
rule_tag_subquery = rule_tag_subquery.filter(model.DBrule.rule_name.in_(findings_filter.rule_names))
rule_tag_subquery = rule_tag_subquery.group_by(model.DBruleTag.rule_id).subquery()
limit_val = MAX_RECORDS_PER_PAGE_LIMIT if limit > MAX_RECORDS_PER_PAGE_LIMIT else limit
query = db_connection.query(
model.DBfinding.id_,
model.DBfinding.file_path,
model.DBfinding.line_number,
model.DBfinding.column_start,
model.DBfinding.column_end,
model.DBfinding.commit_id,
model.DBfinding.commit_message,
model.DBfinding.commit_timestamp,
model.DBfinding.author,
model.DBfinding.email,
model.DBaudit.status,
model.DBaudit.comment,
model.DBfinding.rule_name,
model.DBscan.rule_pack,
model.DBfinding.event_sent_on,
model.DBscan.timestamp,
model.DBscan.id_.label("scan_id"),
model.DBscan.last_scanned_commit,
model.DBVcsInstance.provider_type.label("vcs_provider"),
model.DBrepository.project_key,
model.DBrepository.repository_name,
model.DBrepository.repository_url,
)
query = query.join(model.DBscanFinding, model.DBfinding.id_ == model.DBscanFinding.finding_id)
if findings_filter.scan_ids:
query = query.join(model.DBscan, and_(model.DBscanFinding.scan_id == model.DBscan.id_,
model.DBscan.id_.in_(findings_filter.scan_ids)))
else:
query = query.join(max_base_scan_subquery,
model.DBfinding.repository_id == max_base_scan_subquery.c.repository_id)
query = query.join(model.DBscan, and_(model.DBscanFinding.scan_id == model.DBscan.id_,
model.DBscan.id_ >= max_base_scan_subquery.c.latest_base_scan_id))
query = query.join(model.DBrepository,
model.repository.DBrepository.id_ == model.finding.DBfinding.repository_id) \
.join(model.DBVcsInstance,
model.vcs_instance.DBVcsInstance.id_ == model.repository.DBrepository.vcs_instance)
query = query.join(max_audit_subquery, max_audit_subquery.c.finding_id == model.finding.DBfinding.id_,
isouter=True)
query = query.join(model.DBaudit, and_(model.audit.DBaudit.finding_id == model.finding.DBfinding.id_,
model.audit.DBaudit.id_ == max_audit_subquery.c.audit_id),
isouter=True)
if findings_filter.rule_tags:
query = query.join(model.DBrule, and_(model.DBrule.rule_name == model.DBfinding.rule_name,
model.DBrule.rule_pack == model.DBscan.rule_pack))
query = query.join(rule_tag_subquery, model.DBrule.id_ == rule_tag_subquery.c.rule_id)
if findings_filter.rule_pack_versions:
query = query.filter(model.DBscan.rule_pack.in_(findings_filter.rule_pack_versions))
if findings_filter.start_date_time:
query = query.filter(model.scan.DBscan.timestamp >= findings_filter.start_date_time)
if findings_filter.end_date_time:
query = query.filter(model.scan.DBscan.timestamp <= findings_filter.end_date_time)
if findings_filter.event_sent is not None:
if findings_filter.event_sent:
query = query.filter(model.finding.DBfinding.event_sent_on.is_not(None))
else:
query = query.filter(model.finding.DBfinding.event_sent_on.is_(None))
if findings_filter.repository_name:
query = query.filter(model.DBrepository.repository_name == findings_filter.repository_name)
if findings_filter.vcs_providers and findings_filter.vcs_providers is not None:
query = query.filter(model.vcs_instance.DBVcsInstance.provider_type.in_(findings_filter.vcs_providers))
if findings_filter.project_name:
query = query.filter(model.repository.DBrepository.project_key == findings_filter.project_name)
if findings_filter.rule_names:
query = query.filter(model.DBfinding.rule_name.in_(findings_filter.rule_names))
if findings_filter.finding_statuses:
if FindingStatus.NOT_ANALYZED in findings_filter.finding_statuses:
query = query.filter(or_(model.DBaudit.status.in_(findings_filter.finding_statuses),
model.DBaudit.status == None)) # noqa: E711
else:
query = query.filter(model.DBaudit.status.in_(findings_filter.finding_statuses))
query = query.order_by(model.finding.DBfinding.id_)
findings: List[detailed_finding_schema.DetailedFindingRead] = query.offset(skip).limit(limit_val).all()
return findings
def get_detailed_findings_count(db_connection: Session, findings_filter: FindingsFilter) -> int:
"""
Retrieve count of detailed findings objects matching the provided FindingsFilter
:param findings_filter:
Object of type FindingsFilter, only DetailedFindingRead objects matching the attributes in this filter will be
fetched
:param db_connection:
Session of the database connection
:return: total_count
count of findings
"""
# subquery to select latest audit ids of findings
max_audit_subquery = db_connection.query(model.DBaudit.finding_id,
func.max(model.DBaudit.id_).label("audit_id")) \
.group_by(model.DBaudit.finding_id).subquery()
max_base_scan_subquery = db_connection.query(model.DBscan.repository_id,
func.max(model.DBscan.id_).label("latest_base_scan_id"))
max_base_scan_subquery = max_base_scan_subquery.filter(model.DBscan.scan_type == ScanType.BASE)
if findings_filter.rule_pack_versions:
max_base_scan_subquery = max_base_scan_subquery.filter(
model.DBscan.rule_pack.in_(findings_filter.rule_pack_versions))
max_base_scan_subquery = max_base_scan_subquery.group_by(model.DBscan.repository_id).subquery()
rule_tag_subquery = db_connection.query(model.DBruleTag.rule_id) \
.join(model.DBtag, model.DBruleTag.tag_id == model.DBtag.id_)
if findings_filter.rule_tags:
rule_tag_subquery = rule_tag_subquery.filter(model.DBtag.name.in_(findings_filter.rule_tags))
if findings_filter.rule_pack_versions or findings_filter.rule_names:
rule_tag_subquery = rule_tag_subquery.join(model.DBrule, model.DBrule.id_ == model.DBruleTag.rule_id)
if findings_filter.rule_pack_versions:
rule_tag_subquery = rule_tag_subquery.filter(model.DBrule.rule_pack.in_(findings_filter.rule_pack_versions))
if findings_filter.rule_names:
rule_tag_subquery = rule_tag_subquery.filter(model.DBrule.rule_name.in_(findings_filter.rule_names))
rule_tag_subquery = rule_tag_subquery.group_by(model.DBruleTag.rule_id).subquery()
query = db_connection.query(func.count(model.DBfinding.id_))
query = query.join(model.DBscanFinding, model.DBfinding.id_ == model.DBscanFinding.finding_id)
if findings_filter.scan_ids:
query = query.join(model.DBscan, and_(model.DBscanFinding.scan_id == model.DBscan.id_,
model.DBscan.id_.in_(findings_filter.scan_ids)))
else:
query = query.join(max_base_scan_subquery,
model.DBfinding.repository_id == max_base_scan_subquery.c.repository_id)
query = query.join(model.DBscan, and_(model.DBscanFinding.scan_id == model.DBscan.id_,
model.DBscan.id_ >= max_base_scan_subquery.c.latest_base_scan_id))
query = query.join(model.DBrepository,
model.repository.DBrepository.id_ == model.finding.DBfinding.repository_id) \
.join(model.DBVcsInstance,
model.vcs_instance.DBVcsInstance.id_ == model.repository.DBrepository.vcs_instance)
query = query.join(max_audit_subquery, max_audit_subquery.c.finding_id == model.finding.DBfinding.id_,
isouter=True)
query = query.join(model.DBaudit, and_(model.audit.DBaudit.finding_id == model.finding.DBfinding.id_,
model.audit.DBaudit.id_ == max_audit_subquery.c.audit_id),
isouter=True)
if findings_filter.rule_tags:
query = query.join(model.DBrule, and_(model.DBrule.rule_name == model.DBfinding.rule_name,
model.DBrule.rule_pack == model.DBscan.rule_pack))
query = query.join(rule_tag_subquery, model.DBrule.id_ == rule_tag_subquery.c.rule_id)
if findings_filter.rule_pack_versions:
query = query.filter(model.DBscan.rule_pack.in_(findings_filter.rule_pack_versions))
if findings_filter.start_date_time:
query = query.filter(model.scan.DBscan.timestamp >= findings_filter.start_date_time)
if findings_filter.end_date_time:
query = query.filter(model.scan.DBscan.timestamp <= findings_filter.end_date_time)
if findings_filter.event_sent is not None:
if findings_filter.event_sent:
query = query.filter(model.finding.DBfinding.event_sent_on.is_not(None))
else:
query = query.filter(model.finding.DBfinding.event_sent_on.is_(None))
if findings_filter.repository_name:
query = query.filter(model.DBrepository.repository_name == findings_filter.repository_name)
if findings_filter.vcs_providers and findings_filter.vcs_providers is not None:
query = query.filter(model.vcs_instance.DBVcsInstance.provider_type.in_(findings_filter.vcs_providers))
if findings_filter.project_name:
query = query.filter(model.repository.DBrepository.project_key == findings_filter.project_name)
if findings_filter.rule_names:
query = query.filter(model.DBfinding.rule_name.in_(findings_filter.rule_names))
if findings_filter.finding_statuses:
if FindingStatus.NOT_ANALYZED in findings_filter.finding_statuses:
query = query.filter(or_(model.DBaudit.status.in_(findings_filter.finding_statuses),
model.DBaudit.status == None)) # noqa: E711
else:
query = query.filter(model.DBaudit.status.in_(findings_filter.finding_statuses))
findings_count = query.scalar()
return findings_count
def get_detailed_finding(db_connection: Session, finding_id: int) -> detailed_finding_schema.DetailedFindingRead:
"""
Retrieve a detailed finding objects matching the provided finding_id
:param db_connection:
Session of the database connection
:param finding_id:
ID of the finding object for which a DetailedFinding is to be fetched
:return: DetailedFindingRead
The output will contain an object of type DetailedFindingRead,
or a null object finding was found for the given finding_id
"""
max_scan_subquery = db_connection.query(model.DBscanFinding.finding_id,
func.max(model.DBscanFinding.scan_id).label("scan_id"))
max_scan_subquery = max_scan_subquery.group_by(model.DBscanFinding.finding_id).subquery()
# subquery to select latest audit ids of findings
max_audit_subquery = db_connection.query(model.DBaudit.finding_id,
func.max(model.DBaudit.id_).label("audit_id")) \
.group_by(model.DBaudit.finding_id).subquery()
scan_id = model.DBscan.id_.label("scan_id")
query = db_connection.query(
model.DBfinding.id_,
model.DBfinding.file_path,
model.DBfinding.line_number,
model.DBfinding.column_start,
model.DBfinding.column_end,
model.DBfinding.commit_id,
model.DBfinding.commit_message,
model.DBfinding.commit_timestamp,
model.DBfinding.author,
model.DBfinding.email,
model.DBaudit.status,
model.DBaudit.comment,
model.DBfinding.rule_name,
model.DBscan.rule_pack,
model.DBscan.timestamp,
scan_id,
model.DBscan.last_scanned_commit,
model.DBVcsInstance.provider_type.label("vcs_provider"),
model.DBrepository.project_key,
model.DBrepository.repository_name,
model.DBrepository.repository_url,
).join(max_scan_subquery, model.finding.DBfinding.id_ == max_scan_subquery.c.finding_id) \
.join(model.DBscan,
model.scan.DBscan.id_ == max_scan_subquery.c.scan_id) \
.join(model.DBrepository,
model.repository.DBrepository.id_ == model.scan.DBscan.repository_id) \
.join(model.DBVcsInstance,
model.vcs_instance.DBVcsInstance.id_ == model.repository.DBrepository.vcs_instance) \
.join(max_audit_subquery, max_audit_subquery.c.finding_id == model.finding.DBfinding.id_,
isouter=True) \
.join(model.DBaudit, and_(model.audit.DBaudit.finding_id == model.finding.DBfinding.id_,
model.audit.DBaudit.id_ == max_audit_subquery.c.audit_id),
isouter=True) \
.filter(model.finding.DBfinding.id_ == finding_id)
finding = query.first()
return finding | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/crud/detailed_finding.py | 0.729231 | 0.220227 | detailed_finding.py | pypi |
from typing import List
# Third Party
from sqlalchemy.orm import Session
# First Party
from resc_backend.db import model
def create_rule_tag(db_connection: Session, rule_id: int, tags: List[str]) -> List[model.DBruleTag]:
"""
Create rule tag entries, linking / creating tag names to a rule
:param db_connection:
Session of the database connection
:param rule_id:
ID of the rule to link the tags to
:param tags:
List of string type containing tags to link to the rule
:return: [DBruleTag]
The output will contain a list of DBruleTag objects
"""
db_tags = create_tags_if_not_exists(db_connection, tags)
db_rule_tags = []
for db_tag in db_tags:
db_rule_tag = model.DBruleTag(
rule_id=rule_id,
tag_id=db_tag.id_
)
db_rule_tags.append(db_rule_tag)
if db_rule_tags:
db_connection.add_all(db_rule_tags)
db_connection.flush()
db_connection.commit()
return db_rule_tags
def create_tags_if_not_exists(db_connection: Session, tags: List[str]) -> List[model.DBtag]:
"""
Create tags if they don't exist or select existing
:param db_connection:
Session of the database connection
:param tags:
List of string type containing tags to create if they don't exist
:return: [DBtag]
The output will contain a list of tag objects
"""
# Query the database to see if the tags objects exists
db_tags = db_connection.query(model.DBtag).filter(model.DBtag.name.in_(tags)).all()
if db_tags is not None:
if len(db_tags) == len(tags):
# all tags are in the db no need to create them
return db_tags
tags_known = [x.name for x in db_tags]
tags_to_create = [tag for tag in tags if tag not in tags_known]
# Created the tags not known
created_tags = create_tags(db_connection, tags_to_create)
db_tags.extend(created_tags)
else:
# None of the tags are known, create them all
db_tags = create_tags(db_connection, tags)
return db_tags
def create_tags(db_connection: Session, tags: List[str]) -> List[model.DBtag]:
"""
Create tags
:param db_connection:
Session of the database connection
:param tags:
List of string type containing tags to create
:return: [DBtag]
The output will contain a list of tag objects
"""
db_create_tags = []
for tag_name in tags:
db_create_tag = model.DBtag(name=tag_name)
db_create_tags.append(db_create_tag)
if db_create_tags:
db_connection.add_all(db_create_tags)
db_connection.flush()
db_connection.commit()
return db_create_tags
def get_rule_tag_names_by_rule_pack_version(db_connection: Session, rule_pack_version: str):
"""
Get rule names and there tags based on the rule pack version
:param db_connection:
Session of the database connection
:param rule_pack_version:
Version of the rule pack for which to retrieve the rule tags
:return: [rule.rule_name, tag.name]
The output will contain a list of each rule and tag occurrence in the rule_pack
"""
query = db_connection.query(model.DBrule.rule_name, model.DBtag.name)\
.join(model.DBruleTag, model.DBruleTag.tag_id == model.DBtag.id_)\
.join(model.DBrule, model.DBrule.id_ == model.DBruleTag.rule_id)\
.filter(model.DBrule.rule_pack == rule_pack_version)
return query.all() | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/crud/rule_tag.py | 0.749912 | 0.293506 | rule_tag.py | pypi |
import logging
from datetime import datetime, timedelta
# Third Party
from sqlalchemy import and_, extract, func, or_
from sqlalchemy.engine import Row
from sqlalchemy.orm import Session
# First Party
from resc_backend.constants import DEFAULT_RECORDS_PER_PAGE_LIMIT, MAX_RECORDS_PER_PAGE_LIMIT
from resc_backend.db import model
from resc_backend.resc_web_service.schema.finding_status import FindingStatus
from resc_backend.resc_web_service.schema.time_period import TimePeriod
logger = logging.getLogger(__name__)
def create_audit(db_connection: Session, finding_id: int, auditor: str,
status: FindingStatus, comment: str = "") -> model.DBaudit:
"""
Audit finding, updating the status and comment
:param db_connection:
Session of the database connection
:param finding_id:
id of the finding to audit
:param auditor:
identifier of the person performing the audit action
:param status:
audit status to set, type FindingStatus
:param comment:
audit comment to set
:return: DBaudit
The output will contain the audit that was created
"""
db_audit = model.audit.DBaudit(
finding_id=finding_id,
auditor=auditor,
status=status,
comment=comment,
timestamp=datetime.utcnow()
)
db_connection.add(db_audit)
db_connection.commit()
db_connection.refresh(db_audit)
return db_audit
def get_finding_audits(db_connection: Session, finding_id: int, skip: int = 0,
limit: int = DEFAULT_RECORDS_PER_PAGE_LIMIT) -> [model.DBaudit]:
"""
Get Audit entries for finding
:param db_connection:
Session of the database connection
:param finding_id:
id of the finding to audit
:param skip:
integer amount of records to skip to support pagination
:param limit:
integer amount of records to return, to support pagination
:return: [DBaudit]
The output will contain the list of audit items for the given finding
"""
limit_val = MAX_RECORDS_PER_PAGE_LIMIT if limit > MAX_RECORDS_PER_PAGE_LIMIT else limit
query = db_connection.query(model.DBaudit).filter(model.DBaudit.finding_id == finding_id)
query = query.order_by(model.DBaudit.id_.desc()).offset(skip).limit(limit_val)
finding_audits = query.all()
return finding_audits
def get_finding_audits_count(db_connection: Session, finding_id: int) -> int:
"""
Get count of Audit entries for finding
:param db_connection:
Session of the database connection
:param finding_id:
id of the finding to audit
:return: total_count
count of audit entries
"""
total_count = db_connection.query(func.count(model.DBaudit.id_)) \
.filter(model.DBaudit.finding_id == finding_id).scalar()
return total_count
def get_audit_count_by_auditor_over_time(db_connection: Session, weeks: int = 13) -> list[Row]:
"""
Retrieve count audits by auditor over time for given weeks
:param db_connection:
Session of the database connection
:param weeks:
optional, filter on last n weeks, default 13
:return: count_over_time
list of rows containing audit count over time per week
"""
last_nth_week_date_time = datetime.utcnow() - timedelta(weeks=weeks)
query = db_connection.query(extract('year', model.DBaudit.timestamp).label("year"),
extract('week', model.DBaudit.timestamp).label("week"),
model.DBaudit.auditor,
func.count(model.DBaudit.id_).label("audit_count")) \
.filter(or_(extract('year', model.DBaudit.timestamp) > extract('year', last_nth_week_date_time),
and_(extract('year', model.DBaudit.timestamp) == extract('year', last_nth_week_date_time),
extract('week', model.DBaudit.timestamp) >= extract('week', last_nth_week_date_time)))) \
.group_by(extract('year', model.DBaudit.timestamp).label("year"),
extract('week', model.DBaudit.timestamp).label("week"),
model.DBaudit.auditor) \
.order_by(extract('year', model.DBaudit.timestamp).label("year"),
extract('week', model.DBaudit.timestamp).label("week"),
model.DBaudit.auditor)
finding_audits = query.all()
return finding_audits
def get_personal_audit_count(db_connection: Session, auditor: str, time_period: TimePeriod) -> int:
"""
Get count of Audit entries for finding
:param db_connection:
Session of the database connection
:param auditor:
id of the auditor
:param time_period:
period for which to retrieve the audit counts
:return: total_count
count of audit entries
"""
date_today = datetime.utcnow()
total_count = db_connection.query(func.count(model.DBaudit.id_))
if time_period in (time_period.DAY, time_period.MONTH, time_period.YEAR):
total_count = total_count.filter(extract('year', model.DBaudit.timestamp) == extract('year', date_today))
if time_period in (time_period.DAY, time_period.MONTH):
total_count = total_count.filter(extract('month', model.DBaudit.timestamp) == extract('month', date_today))
if time_period == time_period.DAY:
total_count = total_count.filter(extract('day', model.DBaudit.timestamp) == extract('day', date_today))
if time_period in (time_period.WEEK, time_period.LAST_WEEK):
date_last_week = datetime.utcnow() - timedelta(weeks=1)
date_week = date_last_week if time_period == time_period.LAST_WEEK else date_today
total_count = total_count.filter(extract('year', model.DBaudit.timestamp) == extract('year', date_week))
total_count = total_count.filter(extract('week', model.DBaudit.timestamp) == extract('week', date_week))
total_count = total_count.filter(model.DBaudit.auditor == auditor).scalar()
return total_count | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/crud/audit.py | 0.741674 | 0.394493 | audit.py | pypi |
from datetime import datetime
from typing import List
# Third Party
from sqlalchemy import and_, func
from sqlalchemy.orm import Session
# First Party
from resc_backend.constants import DEFAULT_RECORDS_PER_PAGE_LIMIT, MAX_RECORDS_PER_PAGE_LIMIT
from resc_backend.db import model
from resc_backend.resc_web_service.crud import finding as finding_crud
from resc_backend.resc_web_service.crud import scan_finding as scan_finding_crud
from resc_backend.resc_web_service.schema import scan as scan_schema
from resc_backend.resc_web_service.schema.finding_status import FindingStatus
from resc_backend.resc_web_service.schema.scan_type import ScanType
def get_scan(db_connection: Session, scan_id: int) -> model.DBscan:
scan = db_connection.query(model.DBscan).filter(model.scan.DBscan.id_ == scan_id).first()
return scan
def get_latest_scan_for_repository(db_connection: Session, repository_id: int) -> model.DBscan:
"""
Retrieve the most recent scan of a given repository object
:param db_connection:
Session of the database connection
:param repository_id:
id of the repository object for which to retrieve the most recent scan
:return: scan
scan object having the most recent timestamp for a given repository object
"""
subquery = (db_connection.query(func.max(model.DBscan.timestamp).label("max_time"))
.filter(model.scan.DBscan.repository_id == repository_id)).subquery()
scan = db_connection.query(model.DBscan) \
.join(subquery,
and_(model.DBscan.timestamp == subquery.c.max_time)) \
.filter(model.scan.DBscan.repository_id == repository_id).first()
return scan
def get_scans(db_connection: Session, skip: int = 0,
limit: int = DEFAULT_RECORDS_PER_PAGE_LIMIT, repository_id: int = -1) -> List[model.DBscan]:
"""
Retrieve the scan records, ordered by scan_id and optionally filtered by repository_id
:param db_connection:
Session of the database connection
:param repository_id:
optional int filtering the repository for which to retrieve scans
:param skip:
integer amount of records to skip to support pagination
:param limit:
integer amount of records to return, to support pagination
:return: [DBscan]
List of DBScan objects
"""
limit_val = MAX_RECORDS_PER_PAGE_LIMIT if limit > MAX_RECORDS_PER_PAGE_LIMIT else limit
query = db_connection.query(model.DBscan)
if repository_id > 0:
query = query.filter(model.DBscan.repository_id == repository_id)
scans = query.order_by(model.scan.DBscan.id_).offset(skip).limit(limit_val).all()
return scans
def get_scans_count(db_connection: Session, repository_id: int = -1) -> int:
"""
Retrieve count of scan records optionally filtered by VCS provider
:param db_connection:
Session of the database connection
:param repository_id:
optional int filtering the repository for which to retrieve scans
:return: total_count
count of scans
"""
query = db_connection.query(func.count(model.DBscan.id_))
if repository_id > 0:
query = query.filter(model.DBscan.repository_id == repository_id)
total_count = query.scalar()
return total_count
def update_scan(db_connection: Session, scan_id: int, scan: scan_schema.ScanCreate) -> model.DBscan:
db_scan = db_connection.query(model.DBscan).filter_by(id_=scan_id).first()
db_scan.scan_type = scan.scan_type
db_scan.last_scanned_commit = scan.last_scanned_commit
db_scan.timestamp = scan.timestamp
db_scan.increment_number = scan.increment_number
db_scan.rule_pack = scan.rule_pack
db_connection.commit()
db_connection.refresh(db_scan)
return db_scan
def create_scan(db_connection: Session, scan: scan_schema.ScanCreate) -> model.DBscan:
db_scan = model.scan.DBscan(
scan_type=scan.scan_type,
last_scanned_commit=scan.last_scanned_commit,
repository_id=scan.repository_id,
timestamp=scan.timestamp,
increment_number=scan.increment_number,
rule_pack=scan.rule_pack
)
db_connection.add(db_scan)
db_connection.commit()
db_connection.refresh(db_scan)
return db_scan
def get_repository_findings_metadata_for_latest_scan(db_connection: Session, repository_id: int,
scan_timestamp: datetime):
"""
Retrieves the finding metadata for latest scan of a repository from the database
:param db_connection:
Session of the database connection
:param repository_id:
repository id of the latest scan
:param scan_timestamp:
timestamp of the latest scan
:return: findings_metadata
findings_metadata containing the count for each status
"""
scan_ids_latest_to_base = []
scans = get_scans(db_connection=db_connection,
repository_id=repository_id, limit=1000000)
scans.sort(key=lambda x: x.timestamp, reverse=True)
for scan in scans:
if scan.timestamp <= scan_timestamp:
scan_ids_latest_to_base.append(scan.id_)
if scan.scan_type == ScanType.BASE:
break
true_positive_count = false_positive_count = not_analyzed_count = \
under_review_count = clarification_required_count = 0
if len(scan_ids_latest_to_base) > 0:
findings_count_by_status = finding_crud.get_findings_count_by_status(
db_connection, scan_ids=scan_ids_latest_to_base, finding_statuses=FindingStatus)
for finding in findings_count_by_status:
finding_status = finding[1]
count = finding[0]
if finding_status == FindingStatus.TRUE_POSITIVE:
true_positive_count = count
if finding_status == FindingStatus.FALSE_POSITIVE:
false_positive_count = count
if finding_status == FindingStatus.NOT_ANALYZED or finding_status is None:
not_analyzed_count += count
if finding_status == FindingStatus.UNDER_REVIEW:
under_review_count = count
if finding_status == FindingStatus.CLARIFICATION_REQUIRED:
clarification_required_count = count
total_findings_count = \
true_positive_count + false_positive_count + not_analyzed_count + under_review_count + \
clarification_required_count
findings_metadata = {
"true_positive": true_positive_count,
"false_positive": false_positive_count,
"not_analyzed": not_analyzed_count,
"under_review": under_review_count,
"clarification_required": clarification_required_count,
"total_findings_count": total_findings_count
}
return findings_metadata
def delete_repository_findings_not_linked_to_any_scan(db_connection: Session, repository_id: int):
"""
Delete findings for a given repository which are not linked to any scans
:param db_connection:
Session of the database connection
:param repository_id:
id of the repository
"""
sub_query = db_connection.query(model.DBscanFinding.finding_id).distinct()
db_connection.query(model.DBfinding) \
.filter(model.finding.DBfinding.id_.not_in(sub_query), model.finding.DBfinding.repository_id == repository_id) \
.delete(synchronize_session=False)
db_connection.commit()
def delete_scan(db_connection: Session, repository_id: int, scan_id: int, delete_related: bool = False):
"""
Delete a scan object
:param db_connection:
Session of the database connection
:param repository_id:
repository_id for which findings will be deleted which are not linked to any scans
:param scan_id:
id of the scan to be deleted
:param delete_related:
if related records need to be deleted
"""
if delete_related:
scan_finding_crud.delete_scan_finding(db_connection, scan_id=scan_id)
db_connection.query(model.DBscan) \
.filter(model.scan.DBscan.id_ == scan_id) \
.delete(synchronize_session=False)
db_connection.commit()
delete_repository_findings_not_linked_to_any_scan(db_connection, repository_id=repository_id)
def delete_scans_by_repository_id(db_connection: Session, repository_id: int):
"""
Delete scans for a given repository
:param db_connection:
Session of the database connection
:param repository_id:
id of the repository
"""
db_connection.query(model.DBscan) \
.filter(model.scan.DBscan.repository_id == repository_id) \
.delete(synchronize_session=False)
db_connection.commit()
def delete_scans_by_vcs_instance_id(db_connection: Session, vcs_instance_id: int):
"""
Delete scans for a given vcs instance
:param db_connection:
Session of the database connection
:param vcs_instance_id:
id of the vcs instance
"""
db_connection.query(model.DBscan) \
.filter(model.scan.DBscan.repository_id == model.repository.DBrepository.id_,
model.repository.DBrepository.vcs_instance == model.vcs_instance.DBVcsInstance.id_,
model.vcs_instance.DBVcsInstance.id_ == vcs_instance_id) \
.delete(synchronize_session=False)
db_connection.commit() | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/crud/scan.py | 0.835349 | 0.27973 | scan.py | pypi |
from typing import List
# Third Party
from fastapi import APIRouter, status
from fastapi_cache.decorator import cache
# First Party
from resc_backend.constants import (
CACHE_NAMESPACE_VCS_INSTANCE,
COMMON_TAG,
ERROR_MESSAGE_500,
ERROR_MESSAGE_503,
REDIS_CACHE_EXPIRE,
RWS_ROUTE_AUTH_CHECK,
RWS_ROUTE_SUPPORTED_VCS_PROVIDERS
)
from resc_backend.resc_web_service.schema.vcs_provider import VCSProviders
router = APIRouter(tags=[COMMON_TAG])
@router.get(f"{RWS_ROUTE_SUPPORTED_VCS_PROVIDERS}",
response_model=List[str],
summary="Get supported vcs-providers",
description="Retrieve the supported vcs-providers, example: Bitbucket, AzureDevOps, Github etc",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve the supported vcs-providers"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_VCS_INSTANCE, expire=REDIS_CACHE_EXPIRE)
def get_supported_vcs_providers() -> List[str]:
"""
Retrieve all supported vcs providers
:return: List[str]
The output will contain a list of strings of unique vcs providers
"""
supported_vcs = [vcs for vcs in VCSProviders if vcs]
return supported_vcs
@router.get(f"{RWS_ROUTE_AUTH_CHECK}",
summary="Authorization check",
description="The output returns 200 OK if auth check is successful else returns 403 Forbidden",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Validate authorization check from the access-token"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
def auth_check():
"""
Validates authorization check from the access token
:return: str
The output will contain 200 OK if auth check is successful else it will return 403 Forbidden
"""
return {"message": "OK"} | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/endpoints/common.py | 0.597725 | 0.193967 | common.py | pypi |
import logging
from datetime import datetime, timedelta
from typing import Optional
# Third Party
from fastapi import APIRouter, Depends, Query, Request, status
from fastapi_cache.decorator import cache
# First Party
from resc_backend.constants import (
CACHE_NAMESPACE_FINDING,
ERROR_MESSAGE_500,
ERROR_MESSAGE_503,
METRICS_TAG,
REDIS_CACHE_EXPIRE,
RWS_ROUTE_AUDIT_COUNT_BY_AUDITOR_OVER_TIME,
RWS_ROUTE_AUDITED_COUNT_OVER_TIME,
RWS_ROUTE_COUNT_PER_VCS_PROVIDER_BY_WEEK,
RWS_ROUTE_METRICS,
RWS_ROUTE_PERSONAL_AUDITS,
RWS_ROUTE_UN_TRIAGED_COUNT_OVER_TIME
)
from resc_backend.db.connection import Session
from resc_backend.resc_web_service.cache_manager import CacheManager
from resc_backend.resc_web_service.crud import audit as audit_crud
from resc_backend.resc_web_service.crud import finding as finding_crud
from resc_backend.resc_web_service.dependencies import get_db_connection
from resc_backend.resc_web_service.schema.audit_count_over_time import AuditCountOverTime
from resc_backend.resc_web_service.schema.finding_count_over_time import FindingCountOverTime
from resc_backend.resc_web_service.schema.finding_status import FindingStatus
from resc_backend.resc_web_service.schema.personal_audit_metrics import PersonalAuditMetrics
from resc_backend.resc_web_service.schema.time_period import TimePeriod
from resc_backend.resc_web_service.schema.vcs_provider import VCSProviders
router = APIRouter(prefix=f"{RWS_ROUTE_METRICS}", tags=[METRICS_TAG])
logger = logging.getLogger(__name__)
@router.get(f"{RWS_ROUTE_AUDITED_COUNT_OVER_TIME}",
response_model=list[FindingCountOverTime],
summary="Get count of audit status over time for given weeks per vcs provider",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve count of audit status over time for given weeks per vcs provider"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE)
def get_finding_audit_count_over_time(db_connection: Session = Depends(get_db_connection),
weeks: Optional[int] = Query(default=13, ge=1),
audit_status: Optional[FindingStatus] = Query(default=FindingStatus.TRUE_POSITIVE)
) -> list[FindingCountOverTime]:
"""
Retrieve count of audited findings over time for given weeks per vcs provider
- **db_connection**: Session of the database connection
- **weeks**: Nr of weeks for which to retrieve the audit status count
- **audit_status**: audit status for which to retrieve the counts, defaults to True positive
- **return**: [DateCountModel]
The output will contain a list of DateCountModel type objects
"""
audit_counts = finding_crud.get_finding_audit_status_count_over_time(db_connection=db_connection,
status=audit_status,
weeks=weeks)
output = convert_rows_to_finding_count_over_time(count_over_time=audit_counts, weeks=weeks)
return output
@router.get(f"{RWS_ROUTE_COUNT_PER_VCS_PROVIDER_BY_WEEK}",
response_model=list[FindingCountOverTime],
summary="Get count of findings over time for given weeks per vcs provider",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve count of findings over time for given weeks per vcs provider"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE)
def get_finding_total_count_over_time(db_connection: Session = Depends(get_db_connection),
weeks: Optional[int] = Query(default=13, ge=1)) -> list[FindingCountOverTime]:
"""
Retrieve count of findings over time for given weeks per vcs provider
- **db_connection**: Session of the database connection
- **weeks**: Nr of weeks for which to retrieve the audit status count
- **audit_status**: audit status for which to retrieve the counts, defaults to True positive
- **return**: [DateCountModel]
The output will contain a list of DateCountModel type objects
"""
audit_counts = finding_crud.get_finding_count_by_vcs_provider_over_time(db_connection=db_connection, weeks=weeks)
output = convert_rows_to_finding_count_over_time(count_over_time=audit_counts, weeks=weeks)
return output
@router.get(f"{RWS_ROUTE_UN_TRIAGED_COUNT_OVER_TIME}",
response_model=list[FindingCountOverTime],
summary="Get count of UnTriaged findings over time for given weeks per vcs provider",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve count of UnTriaged findings over time for given weeks per vcs provider"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE)
def get_finding_un_triaged_count_over_time(db_connection: Session = Depends(get_db_connection),
weeks: Optional[int] = Query(default=13, ge=1)) \
-> list[FindingCountOverTime]:
"""
Retrieve count of UnTriaged findings over time for given weeks per vcs provider
- **db_connection**: Session of the database connection
- **weeks**: Nr of weeks for which to retrieve the audit status count
- **audit_status**: audit status for which to retrieve the counts, defaults to True positive
- **return**: [DateCountModel]
The output will contain a list of DateCountModel type objects
"""
audit_counts = finding_crud.get_un_triaged_finding_count_by_vcs_provider_over_time(db_connection=db_connection,
weeks=weeks)
output = convert_rows_to_finding_count_over_time(count_over_time=audit_counts, weeks=weeks)
return output
def convert_rows_to_finding_count_over_time(count_over_time: dict, weeks: int) -> list[FindingCountOverTime]:
"""
Convert the rows from the database to the format of list[FindingCountOverTime]
:param count_over_time:
rows from the database
:param weeks:
number fo weeks that are in the data
:return: output
list[FindingCountOverTime]
"""
# Define the vcs provider types and finding statuses
vcs_provider_types = list(VCSProviders)
# create defaults with 0 value
week_groups = {}
for week in range(0, weeks):
nth_week = datetime.utcnow() - timedelta(weeks=week)
week = f"{nth_week.isocalendar().year} W{nth_week.isocalendar().week:02d}"
week_groups[week] = {vcs_provider_type: 0 for vcs_provider_type in vcs_provider_types + ["total"]}
# loop over the counts from the database
for data in count_over_time:
week = f"{data['year']} W{data['week']:02d}"
finding_count = data["finding_count"]
week_groups[week][data["provider_type"]] += finding_count
week_groups[week]["total"] += finding_count
# Convert to the output format
output = []
for week in sorted(week_groups.keys()):
week_data = FindingCountOverTime(time_period=week, total=week_groups[week]["total"])
for vcs_provider_type in vcs_provider_types:
setattr(week_data.vcs_provider_finding_count, vcs_provider_type, week_groups[week][vcs_provider_type])
output.append(week_data)
return output
@router.get(f"{RWS_ROUTE_AUDIT_COUNT_BY_AUDITOR_OVER_TIME}",
response_model=list[AuditCountOverTime],
summary="Get count of Audits by Auditor over time for given weeks",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve count of Audits by Auditor over time for given weeks"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE)
def get_audit_count_by_auditor_over_time(db_connection: Session = Depends(get_db_connection),
weeks: Optional[int] = Query(default=13, ge=1)) \
-> list[AuditCountOverTime]:
"""
Retrieve count of Audits by Auditor over time for given weeks
- **db_connection**: Session of the database connection
- **weeks**: Nr of weeks for which to retrieve the audit counts
- **return**: [AuditCountOverTime]
The output will contain a list of AuditCountOverTime type objects
"""
audit_counts = audit_crud.get_audit_count_by_auditor_over_time(db_connection=db_connection, weeks=weeks)
# get the unique auditors from the data
auditors_default = {}
for audit in audit_counts:
auditors_default[audit['auditor']] = 0
# default to 0 per auditor for all weeks in range
weekly_audit_counts = {}
for week in range(0, weeks):
nth_week = datetime.utcnow() - timedelta(weeks=week)
week = f"{nth_week.isocalendar().year} W{nth_week.isocalendar().week:02d}"
weekly_audit_counts[week] = AuditCountOverTime(time_period=week, audit_by_auditor_count=dict(auditors_default))
weekly_audit_counts = dict(sorted(weekly_audit_counts.items()))
# set the counts based on the data from the database
for audit in audit_counts:
audit_week = f"{audit['year']} W{audit['week']:02d}"
if audit_week in weekly_audit_counts:
weekly_audit_counts.get(audit_week).audit_by_auditor_count[audit['auditor']] = audit['audit_count']
weekly_audit_counts.get(audit_week).total += audit['audit_count']
sorted_weekly_audit_counts = dict(sorted(weekly_audit_counts.items()))
output = list(sorted_weekly_audit_counts.values())
return output
@router.get(f"{RWS_ROUTE_PERSONAL_AUDITS}",
response_model=PersonalAuditMetrics,
summary="Get personal audit metrics",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Get personal audit metrics"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE, key_builder=CacheManager.personalized_key_builder)
def get_personal_audit_metrics(request: Request, db_connection: Session = Depends(get_db_connection)) \
-> PersonalAuditMetrics:
"""
Retrieve personal audit metrics
- **db_connection**: Session of the database connection
- **return**: [DateCountModel]
The output will contain a PersonalAuditMetrics type objects
"""
audit_counts = PersonalAuditMetrics()
audit_counts.today = audit_crud.get_personal_audit_count(db_connection=db_connection,
auditor=request.user, time_period=TimePeriod.DAY)
audit_counts.current_week = audit_crud.get_personal_audit_count(db_connection=db_connection,
auditor=request.user, time_period=TimePeriod.WEEK)
audit_counts.last_week = audit_crud.get_personal_audit_count(db_connection=db_connection,
auditor=request.user, time_period=TimePeriod.LAST_WEEK)
audit_counts.current_month = audit_crud.get_personal_audit_count(db_connection=db_connection,
auditor=request.user, time_period=TimePeriod.MONTH)
audit_counts.current_year = audit_crud.get_personal_audit_count(db_connection=db_connection,
auditor=request.user, time_period=TimePeriod.YEAR)
audit_counts.forever = audit_crud.get_personal_audit_count(db_connection=db_connection,
auditor=request.user, time_period=TimePeriod.FOREVER)
audit_counts.rank_current_week = determine_audit_rank_current_week(auditor=request.user,
db_connection=db_connection)
return audit_counts
def determine_audit_rank_current_week(auditor: str, db_connection: Session) -> int:
"""
Retrieve personal audit ranking this week, compared to other auditors
- **db_connection**: Session of the database connection
- **auditor**: id of the auditor
- **return**: int
The output will be an integer nr of the ranking this week, defaulting to 0 if no audit was done by the auditor
"""
audit_rank = 0
audit_counts_db = audit_crud.get_audit_count_by_auditor_over_time(db_connection=db_connection, weeks=0)
auditor_counts = {}
for audit in audit_counts_db:
auditor_counts[audit['auditor']] = audit['audit_count']
sorted_auditor_counts = sorted(auditor_counts.items(), key=lambda x: x[1], reverse=True)
for auditor_count in dict(sorted_auditor_counts):
audit_rank += 1
if auditor_count == auditor:
return audit_rank
return 0 | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/endpoints/metrics.py | 0.837088 | 0.207616 | metrics.py | pypi |
import json
import logging
import urllib.parse
# Third Party
from fastapi import APIRouter, Depends, HTTPException, Query, status
from fastapi_cache.decorator import cache
# First Party
from resc_backend.constants import (
CACHE_NAMESPACE_FINDING,
DEFAULT_RECORDS_PER_PAGE_LIMIT,
ERROR_MESSAGE_500,
ERROR_MESSAGE_503,
FINDINGS_TAG,
REDIS_CACHE_EXPIRE,
RWS_ROUTE_DETAILED_FINDINGS
)
from resc_backend.db.connection import Session
from resc_backend.resc_web_service.crud import detailed_finding as detailed_finding_crud
from resc_backend.resc_web_service.dependencies import get_db_connection
from resc_backend.resc_web_service.filters import FindingsFilter
from resc_backend.resc_web_service.helpers.resc_swagger_models import Model404
from resc_backend.resc_web_service.schema import detailed_finding as detailed_finding_schema
from resc_backend.resc_web_service.schema.pagination_model import PaginationModel
router = APIRouter(prefix=f"{RWS_ROUTE_DETAILED_FINDINGS}", tags=[FINDINGS_TAG])
logger = logging.getLogger(__name__)
@router.get("",
response_model=PaginationModel[detailed_finding_schema.DetailedFindingRead],
summary="Get all detailed findings",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve all the findings"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE)
def get_all_detailed_findings(skip: int = Query(default=0, ge=0),
limit: int = Query(default=DEFAULT_RECORDS_PER_PAGE_LIMIT, ge=1),
db_connection: Session = Depends(get_db_connection),
query_string: str = None
) \
-> PaginationModel[detailed_finding_schema.DetailedFindingRead]:
"""
Retrieve all findings objects paginated
- **query_string**
A query string with the following format:
param1=value1¶m2=value2¶m3=value3
Where the possible parameters are:
- vcs_providers [enum] of type VCSProviders, possible values are: BITBUCKET, AZURE_DEVOPS.
Will default to all if non-specified.
- finding_statuses [enum of type FindingStatus], possible values are:NOT_ANALYZED,FALSE_POSITIVE,
TRUE_POSITIVE. Will default to all if non-specified.
- rule_pack_versions of type [String]
- rule_names of type [String]
- rule_tags of type [String] findings in the result will have at least one of the specified tags
for the rules
- project_name of type String
- repository_names of type [String]
- scan_ids of type list Integer
- start_date_time of type datetime with the following format: 1970-01-31T00:00:00
- end_date_time of type datetime with the following format: 1970-01-31T00:00:00
- **db_connection**
Session of the database connection
- **skip**
Integer amount of records to skip to support pagination
- **limit**
Integer amount of records to return, to support pagination
- **return** [FindingRead]
The output will contain a PaginationModel containing the list of DetailedFinding type objects,
or an empty list if no finding was found
"""
parsed_query_string_params = dict(urllib.parse.parse_qsl(query_string))
if parsed_query_string_params.get('scan_ids'):
parsed_query_string_params['scan_ids'] = json.loads(parsed_query_string_params['scan_ids'])
if parsed_query_string_params.get('vcs_providers'):
parsed_query_string_params['vcs_providers'] = json.loads(parsed_query_string_params['vcs_providers']
.replace('\'', '"'))
if parsed_query_string_params.get('finding_statuses'):
parsed_query_string_params['finding_statuses'] = json.loads(parsed_query_string_params['finding_statuses']
.replace('\'', '"'))
if parsed_query_string_params.get('rule_names'):
parsed_query_string_params['rule_names'] = json.loads(parsed_query_string_params['rule_names']
.replace('\'', '"'))
if parsed_query_string_params.get('rule_tags'):
parsed_query_string_params['rule_tags'] = json.loads(parsed_query_string_params['rule_tags']
.replace('\'', '"'))
if parsed_query_string_params.get('rule_pack_versions'):
parsed_query_string_params['rule_pack_versions'] = json.loads(parsed_query_string_params['rule_pack_versions']
.replace('\'', '"'))
findings_filter = FindingsFilter(**parsed_query_string_params)
findings = detailed_finding_crud.get_detailed_findings(
db_connection, findings_filter=findings_filter, skip=skip, limit=limit)
total_findings = detailed_finding_crud.get_detailed_findings_count(
db_connection, findings_filter=findings_filter)
return PaginationModel[detailed_finding_schema.DetailedFindingRead](
data=findings, total=total_findings, limit=limit, skip=skip)
@router.get("/{finding_id}",
response_model=detailed_finding_schema.DetailedFindingRead,
summary="Fetch detailed finding by ID",
status_code=status.HTTP_200_OK,
responses={
200: {"description": "Retrieve detailed finding <finding_id>"},
404: {"model": Model404, "description": "Finding <finding_id> not found"},
500: {"description": ERROR_MESSAGE_500},
503: {"description": ERROR_MESSAGE_503}
})
@cache(namespace=CACHE_NAMESPACE_FINDING, expire=REDIS_CACHE_EXPIRE)
def read_finding(finding_id: int, db_connection: Session = Depends(get_db_connection)) \
-> detailed_finding_schema.DetailedFindingRead:
"""
Retrieve detailed finding by its ID
- **db_connection**: Session of the database connection
- **finding_id**: ID of the finding for which details need to be fetched
- **return**: [DetailedFindingRead]
The output will contain the details of a finding
"""
db_finding = detailed_finding_crud.get_detailed_finding(db_connection, finding_id=finding_id)
if db_finding is None:
raise HTTPException(status_code=404, detail="Finding not found")
return db_finding | /resc_backend-2.0.0.tar.gz/resc_backend-2.0.0/src/resc_backend/resc_web_service/endpoints/detailed_findings.py | 0.573798 | 0.170854 | detailed_findings.py | pypi |
import logging
import os
import sys
from typing import List
from urllib.parse import urlparse
# Third Party
import pkg_resources
import requests
import yaml
# First Party
from resc_helm_wizard import constants, questions
from resc_helm_wizard.helm_utilities import (
add_helm_repository,
check_helm_release_exists,
install_or_upgrade_helm_release,
update_helm_repository,
validate_helm_deployment_status
)
from resc_helm_wizard.helm_value import HelmValue
from resc_helm_wizard.kubernetes_utilities import create_namespace_if_not_exists
from resc_helm_wizard.vcs_instance import VcsInstance
logging.basicConfig(level=logging.INFO)
def get_operating_system(user_input: str) -> str:
"""
Retrieve operating system
:param user_input:
input from user
:return: str
Returns windows or linux based on the user input
"""
if user_input == "Microsoft Windows":
operating_system = "windows"
else:
operating_system = "linux"
return operating_system
def create_storage_for_db_and_rabbitmq(operating_system: str) -> dict:
"""
Creates volume storage for database and rabbitmq
:param operating_system:
operating system
:return: dict
Returns dictionary containing database storage and rabbitmq storage path
"""
local_storage = questions.ask_local_storage_path()
if os.path.exists(local_storage):
db_storage_path = generate_pvc_path(operating_system=operating_system, path=local_storage, tool_type="database",
create_dir=True)
rabbitmq_storage_path = generate_pvc_path(operating_system=operating_system, path=local_storage,
tool_type="rabbitmq", create_dir=True)
else:
dir_confirm_msg = f"Do you want to create the directory {local_storage}?"
dir_confirm = questions.ask_user_confirmation(msg=dir_confirm_msg)
if dir_confirm is True:
db_storage_path = generate_pvc_path(operating_system=operating_system, path=local_storage,
tool_type="database", create_dir=True)
rabbitmq_storage_path = generate_pvc_path(operating_system=operating_system, path=local_storage,
tool_type="rabbitmq", create_dir=True)
else:
logging.warning(
"Warning! Please ensure the provided directory exists on the system where you are running the "
"deployment")
proceed_confirm = questions.ask_user_confirmation(msg="Are you sure you want to proceed?")
if proceed_confirm is True:
db_storage_path = generate_pvc_path(operating_system=operating_system, path=local_storage,
tool_type="database", create_dir=False)
rabbitmq_storage_path = generate_pvc_path(operating_system=operating_system, path=local_storage,
tool_type="rabbitmq", create_dir=False)
else:
logging.info("Aborting the program!!")
sys.exit(1)
storage_path = {"db_storage_path": db_storage_path, "rabbitmq_storage_path": rabbitmq_storage_path}
return storage_path
def generate_pvc_path(operating_system: str, path: str, tool_type: str, create_dir: bool) -> str:
"""
Generates volume claim path for database and rabbitmq
:param operating_system:
operating system
:param path:
path provided by user
:param tool_type:
tool type either database or rabbitmq
:param create_dir:
should create directory or not
:return: str
Returns volume claim path
"""
if tool_type == "database":
path = os.path.join(path, "resc-db-storage")
if tool_type == "rabbitmq":
path = os.path.join(path, "resc-rabbitmq-storage")
if create_dir:
if not os.path.exists(path):
os.makedirs(path)
logging.info(f"Storage created for {tool_type} at {path}")
else:
logging.info(f"Path already exists. Going to use {path} for {tool_type} storage")
if operating_system == "windows":
pvc_path = path.replace(path.split(':')[0], path.split(':')[0].lower())
pvc_path = pvc_path.replace('\\', '/')
pvc_path = pvc_path.replace(':', '')
pvc_path = f"/run/desktop/mnt/host/{pvc_path}"
else:
pvc_path = path
return pvc_path
def prepare_vcs_instances_for_helm_values(helm_values: HelmValue) -> List[VcsInstance]:
"""
Prepares vcs instances list for helm
:param helm_values:
object of HelmValue
:return: List[VcsInstance]
Returns list of VCS instances
"""
vcs_instances: List[VcsInstance] = []
for vcs in helm_values.vcs_instances:
if vcs.provider_type == "GITHUB_PUBLIC":
user_name = "GITHUB_PUBLIC_USERNAME"
token = "GITHUB_PUBLIC_TOKEN"
if vcs.provider_type == "AZURE_DEVOPS":
user_name = "AZURE_DEVOPS_USERNAME"
token = "AZURE_DEVOPS_TOKEN"
if vcs.provider_type == "BITBUCKET":
user_name = "BITBUCKET_USERNAME"
token = "BITBUCKET_TOKEN"
vcs_instance_obj = {"name": vcs.provider_type, "scope": vcs.scope, "exceptions": [],
"providerType": vcs.provider_type, "hostname": vcs.host, "port": vcs.port,
"scheme": vcs.scheme, "username": user_name, "usernameValue": vcs.username,
"organization": vcs.organization, "token": token, "tokenValue": vcs.password}
vcs_instances.append(vcs_instance_obj)
return vcs_instances
def create_helm_values_yaml(helm_values: HelmValue, input_values_yaml_file: str) -> bool:
"""
Generates values yaml file for helm deployment of resc
:param helm_values:
object of HelmValue
:param input_values_yaml_file:
input values.yaml_file path
:return: bool
Returns True if file created else returns false
:raises FileNotFoundError: if example-values.yaml file was not found
:raises KeyError: if any expected key was not found in the values dictionary
"""
output_file_generated = False
output_values_yaml_file = constants.VALUES_FILE
helm_deployment_help_link = "https://github.com/abnamro/repository-scanner/" \
"blob/main/deployment/kubernetes/README.md"
try:
values_dict = read_yaml_file(input_values_yaml_file)
values_dict["resc-database"]["hostOS"] = helm_values.operating_system
values_dict["resc-database"]["database"]["pvc_path"] = helm_values.db_storage_path
values_dict["resc-rabbitmq"]["filemountType"] = helm_values.operating_system
values_dict["resc-rabbitmq"]["rabbitMQ"]["pvc_path"] = helm_values.rabbitmq_storage_path
values_dict["resc-database"]["database"]["config"]["password"] = helm_values.db_password
values_dict["resc-database-init"]["resc"]["config"]["dbPass"] = helm_values.db_password
values_dict["resc-web-service"]["resc"]["config"]["dbPass"] = helm_values.db_password
values_dict["resc-web-service-no-auth"]["resc"]["config"]["dbPass"] = helm_values.db_password
values_dict["resc-vcs-instances"]["vcsInstances"] = prepare_vcs_instances_for_helm_values(
helm_values=helm_values)
with open(output_values_yaml_file, "w", encoding="utf-8") as file_out:
yaml.dump(values_dict, file_out)
output_values_yaml_file_path = os.path.abspath(output_values_yaml_file)
if os.path.exists(output_values_yaml_file_path):
logging.info(f"Helm values yaml file has been successfully generated at {output_values_yaml_file_path}")
logging.info(f"You can proceed with deployment or you can refer to this link "
f"to make any customizations in helm values yaml file: {helm_deployment_help_link}")
output_file_generated = True
except FileNotFoundError:
logging.error(f"Aborting the program! {input_values_yaml_file} file was not found")
sys.exit(1)
except KeyError as error:
logging.error(f"Aborting the program! {error} was missing in {input_values_yaml_file}")
sys.exit(1)
return output_file_generated
def read_yaml_file(file_path):
"""
Read content of yaml file
:param file_path:
path of yaml file
:return: stream
Returns yaml content
"""
with pkg_resources.resource_stream(__name__, file_path) as file_in:
data = yaml.safe_load(file_in)
return data
def get_scheme_host_port_from_url(url: str):
"""
Get scheme, host, port from url
:param url:
url of VCS instance
:return: str, str, str
Returns scheme, host, port
"""
output = urlparse(url)
if output.port:
port = str(output.port)
else:
port = "443"
return output.scheme, output.hostname, port
def get_vcs_instance_question_answers() -> List[VcsInstance]:
"""
Get VCS instance related question answers
:return: List[VcsInstance]
Returns list of VCS instances
"""
vcs_instance_answers = questions.ask_user_to_select_vcs_instance()
if not vcs_instance_answers:
logging.error("Aborting the program! No VCS instance was selected")
sys.exit(1)
vcs_instances: List[VcsInstance] = []
for vcs in vcs_instance_answers:
vcs_instance_info = questions.ask_vcs_instance_details(vcs_type=vcs)
scheme, host, port = get_scheme_host_port_from_url(vcs_instance_info["url"])
if vcs == "GitHub":
default_github_accounts = f"{vcs_instance_info['username']}, kubernetes, docker"
github_accounts = questions.ask_which_github_accounts_to_scan(
default_github_accounts=default_github_accounts)
github_account_list = [account.strip() for account in github_accounts.split(",")]
vcs_instance = VcsInstance(
provider_type="GITHUB_PUBLIC",
scheme=scheme,
host=host,
port=port,
username=vcs_instance_info["username"],
password=vcs_instance_info["token"],
organization=vcs_instance_info["organization"],
scope=github_account_list
)
if vcs == "Azure Devops":
vcs_instance = VcsInstance(
provider_type="AZURE_DEVOPS",
scheme=scheme,
host=host,
port=port,
username=vcs_instance_info["username"],
password=vcs_instance_info["token"],
organization=vcs_instance_info["organization"],
scope=[]
)
if vcs == "Bitbucket":
vcs_instance = VcsInstance(
provider_type="BITBUCKET",
scheme=scheme,
host=host,
port=port,
username=vcs_instance_info["username"],
password=vcs_instance_info["token"],
organization=vcs_instance_info["organization"],
scope=[]
)
vcs_instances.append(vcs_instance)
return vcs_instances
def download_rule_toml_file(url: str, file: str) -> bool:
"""
Download rule toml file
:param url:
url of the file to download
:param file:
path of the downloaded file
:return: bool
Returns true if rule downloaded successfully else returns false
"""
downloaded = False
verify_ssl = questions.ask_ssl_verification(msg="Do you want to verify SSL certificates for HTTPS requests?")
response = requests.get(url, timeout=100, verify=verify_ssl)
with open(file, "wb") as output:
output.write(response.content)
if os.path.exists(file) and os.path.getsize(file) > 0:
downloaded = True
logging.debug(f"{file} successfully downloaded")
else:
logging.error("Unable to download the rule file")
return downloaded
def run_deployment_as_per_user_confirmation():
"""
Run deployment as per user confirmation
"""
run_deployment_confirm_msg = "Do you want to run deployment?"
run_deployment_confirm = questions.ask_user_confirmation(msg=run_deployment_confirm_msg)
if run_deployment_confirm is True:
run_deployment()
else:
logging.info("Skipping deployment...")
def run_deployment():
"""
Runs a helm deployment
:return: bool
Returns true if deployment successful else returns false
"""
deployment_status = False
rule_file_downloaded = download_rule_toml_file(url=constants.RULE_FILE_URL, file=constants.RULE_FILE)
add_helm_repository()
update_helm_repository()
if rule_file_downloaded:
namespace_created = create_namespace_if_not_exists(namespace_name=constants.NAMESPACE)
if namespace_created:
# Check if release already exists
helm_release_exists = check_helm_release_exists()
if helm_release_exists:
run_upgrade_confirm_msg = f"Release {constants.RELEASE_NAME} is already installed in " \
f"{constants.NAMESPACE} namespace. Do you want to upgrade the release?"
run_upgrade_confirm = questions.ask_user_confirmation(msg=run_upgrade_confirm_msg)
if run_upgrade_confirm is True:
deployment_status = install_or_upgrade_helm_release(action="upgrade")
validate_helm_deployment_status()
else:
logging.info("Skipping deployment...")
else:
deployment_status = install_or_upgrade_helm_release(action="install")
validate_helm_deployment_status()
return deployment_status | /resc_helm_wizard-1.0.6.tar.gz/resc_helm_wizard-1.0.6/src/resc_helm_wizard/common.py | 0.496094 | 0.158272 | common.py | pypi |
import re
def password_validator(password: str):
"""
Password validator for database
:param password:
password which needs to be validated
:return: str or bool.
If validation fails, the output will contain a validation error message.
Otherwise, the output will return true if validation was successful
"""
regex = re.compile(r"^(?=.*[a-z])(?=.*[A-Z])(?=.*\d)(?=.*[@$!%*#?&])[A-Za-z\d@$!#%*?&]{8,128}$")
if not re.fullmatch(regex, password):
return "Password must contain at least one upper case, one lower case, one number, " \
"one special character and the length of the password to be between 8 and 128"
return True
def github_token_validator(token: str):
"""
Personal access token validator for GitHub
:param token:
token which needs to be validated
:return: str or bool.
If validation fails, the output will contain a validation error message.
Otherwise, the output will return true if validation was successful
"""
classic_pat_regex = re.compile(r"^ghp_[a-zA-Z0-9]{36}$")
fine_grained_pat_regex = re.compile(r"^github_pat_[a-zA-Z0-9]{22}_[a-zA-Z0-9]{59}$")
if not re.fullmatch(classic_pat_regex, token) and not re.fullmatch(fine_grained_pat_regex, token):
return "Validation failed for provided GitHub token"
return True
def azure_devops_token_validator(token: str):
"""
Personal access token validator for Azure Devops
:param token:
token which needs to be validated
:return: str or bool.
If validation fails, the output will contain a validation error message.
Otherwise, the output will return true if validation was successful
"""
regex = re.compile(r"^[a-z0-9]{52}$")
if not re.fullmatch(regex, token):
return "Validation failed for provided Azure DevOps token"
return True
def bitbucket_token_validator(token):
"""
Personal access token validator for Bitbucket
:param token:
token which needs to be validated
:return: str or bool.
If validation fails, the output will contain a validation error message.
Otherwise, the output will return true if validation was successful
"""
regex = re.compile(r"^(?=.*[0-9])(?=.*[a-z])(?=.*[A-Z]).{40,50}$")
if not re.fullmatch(regex, token):
return "Validation failed for provided Bitbucket token"
return True
def github_account_name_validator(github_accounts):
"""
GitHub account name validator
:param github_accounts:
comma separated list of GitHub accounts
:return: str or bool.
If validation fails, the output will contain a validation error message.
Otherwise, the output will return true if validation was successful
"""
input_list = [elem.strip() for elem in github_accounts.split(",")]
regex = re.compile(r"^[a-zA-Z\d](?:[a-zA-Z\d]|-(?=[a-zA-Z\d])){0,38}$")
for account in input_list:
if not re.fullmatch(regex, account):
if account:
msg = f"{account} is not a valid GitHub account. " \
f"GitHub account must contain alphanumeric characters or single hyphens, " \
f"can't begin or end with a hyphen and maximum 39 characters allowed."
else:
msg = "Please enter a valid comma separated list of GitHub accounts you want to scan"
return msg
return True
def github_username_validator(username):
"""
GitHub username validator
:param username:
username of GitHub account
:return: str or bool.
If validation fails, the output will contain a validation error message.
Otherwise, the output will return true if validation was successful
"""
regex = re.compile(r"^[a-zA-Z\d](?:[a-zA-Z\d]|-(?=[a-zA-Z\d])){0,38}$")
if not re.fullmatch(regex, username):
msg = f"{username} is not a valid GitHub username. " \
f"GitHub username must contain alphanumeric characters or single hyphens, " \
f"can't begin or end with a hyphen and maximum 39 characters allowed."
return msg
return True
def vcs_url_validator(url):
"""
VCS provider url validator
:param url:
url which needs to be validated
:return: str or bool.
If validation fails, the output will contain a validation error message.
Otherwise, the output will return true if validation was successful
"""
regex = re.compile(
r'^(?:http)s?://' # Scheme
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # Domain
r'localhost|' # Localhost
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # IP address
r'(?::\d+)?' # Port (optional)
r'(?:/?|[/?]\S+)$', re.IGNORECASE) # Path and query (optional)
if not re.fullmatch(regex, url):
return "Please provide a valid URL"
return True | /resc_helm_wizard-1.0.6.tar.gz/resc_helm_wizard-1.0.6/src/resc_helm_wizard/validator.py | 0.662796 | 0.427397 | validator.py | pypi |
import os
# Third Party
import questionary
# First Party
from resc_helm_wizard import constants
from resc_helm_wizard.validator import (
azure_devops_token_validator,
bitbucket_token_validator,
github_account_name_validator,
github_token_validator,
github_username_validator,
password_validator,
vcs_url_validator
)
def ask_operating_system() -> str:
"""
Asks user to select operating system
:return: str
Returns user selected operating system
"""
answer = questionary.select(
message="Which operating system are you running on the target environment",
choices=["Microsoft Windows", "macOS", "Linux"]).unsafe_ask()
return answer
def ask_local_storage_path() -> str:
"""
Asks user to provide path for local storage
:return: str
Returns user provided local storage path
"""
default_local_storage = os.path.expanduser('~')
answer = questionary.path(message="Where would you like to create the local storage for RESC. default is ",
default=default_local_storage, only_directories=True).unsafe_ask()
return answer
def ask_password_for_database() -> str:
"""
Asks user to provide password for database
:return: str
Returns user provided password for database
"""
answer = questionary.password("Please enter the password you want to set for database",
validate=password_validator).unsafe_ask()
return answer
def ask_user_confirmation(msg: str) -> bool:
"""
Asks user to provide confirmation
:param msg:
confirmation message
:return: bool
Returns True or False based on user's confirmation
"""
answer = questionary.confirm(msg).unsafe_ask()
return answer
def ask_user_to_select_vcs_instance() -> [str]:
"""
Asks user to select vcs instances
:return: [str]
Returns array of user selected vcs instances
"""
answer = questionary.checkbox(
'Select VCS instance for which you want to run the scan',
choices=[
"GitHub",
"Azure Devops",
"Bitbucket",
],
default="GitHub").unsafe_ask()
return answer
def ask_vcs_instance_details(vcs_type: str) -> dict:
"""
Asks user to provide vcs instances details
:return: dict
Returns vcs instance info
"""
username = "NA"
organization = ""
if vcs_type == "GitHub":
url = questionary.text(f"Please enter {vcs_type} url",
default=constants.DEFAULT_GITHUB_URL,
validate=vcs_url_validator).unsafe_ask()
username = questionary.text(f"What's your {vcs_type} username",
validate=github_username_validator).unsafe_ask()
token = questionary.password(f"Please enter your {vcs_type} personal access token",
validate=github_token_validator).unsafe_ask()
if vcs_type == "Bitbucket":
url = questionary.text(f"Please enter {vcs_type} url",
validate=vcs_url_validator).unsafe_ask()
username = questionary.text(f"What's your {vcs_type} username").unsafe_ask()
token = questionary.password(f"Please enter your {vcs_type} personal access token",
validate=bitbucket_token_validator).unsafe_ask()
if vcs_type == "Azure Devops":
url = questionary.text(f"Please enter {vcs_type} url",
default=constants.DEFAULT_AZURE_DEVOPS_URL,
validate=vcs_url_validator).unsafe_ask()
organization = questionary.text(f"What's your organization name in {vcs_type}").unsafe_ask()
token = questionary.password(f"Please enter your {vcs_type} personal access token",
validate=azure_devops_token_validator).unsafe_ask()
vcs_instance_info = {"url": url, "organization": organization, "username": username, "token": token}
return vcs_instance_info
def ask_which_github_accounts_to_scan(default_github_accounts: str) -> [str]:
"""
Asks user to provide GitHub account names to scan
:return: [str]
Returns array of GitHub account names
"""
github_accounts = questionary.text("Enter a comma separated list of GitHub accounts you want to scan",
default=default_github_accounts,
validate=github_account_name_validator).unsafe_ask()
return github_accounts
def ask_ssl_verification(msg: str) -> bool:
"""
Asks for ssl verification
:param msg:
confirmation message
:return: bool
Returns True or False based on user's confirmation
"""
answer = questionary.confirm(msg, default=True).unsafe_ask()
return answer | /resc_helm_wizard-1.0.6.tar.gz/resc_helm_wizard-1.0.6/src/resc_helm_wizard/questions.py | 0.598312 | 0.266471 | questions.py | pypi |
import logging.config
import sys
from distutils.sysconfig import get_python_lib
from os import path
from typing import Dict, List, Optional
# Third Party
import tomlkit
# First Party
from vcs_scanner.input_parser import parse_vcs_instances_file
from vcs_scanner.model import VCSInstanceRuntime
logger = logging.getLogger(__name__)
def get_logging_settings_path():
if path.isfile(get_python_lib() + "/vcs_scanner"):
base_dir = get_python_lib() + "/vcs_scanner"
else:
base_dir = path.dirname(__file__)
return base_dir + "/static/logging.ini"
def generate_logger_config(log_file_path, debug=True):
"""A function to generate the global logger config dictionary
Arguments:
log_file_path {string} -- Path where the logs are to be stored
Keyword Arguments:
debug {bool} -- Whether the logging level should be set to DEBUG or INFO (default: {True})
Returns:
Dict -- A dictionary containing the logger configuration
"""
logging_level = "DEBUG" if debug else "INFO"
logging_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"generic-log-formatter": {
"format": "[%(levelname)s] [%(name)s] [%(asctime)s] %(message)s"
},
},
"handlers": {
"console": {
"level": logging_level,
"class": "logging.StreamHandler",
"formatter": "generic-log-formatter",
},
"file": {
"level": logging_level,
"class": "logging.handlers.RotatingFileHandler",
"formatter": "generic-log-formatter",
"filename": log_file_path,
"maxBytes": 100 * 1024 * 1024,
"backupCount": 5
}
},
"loggers": {
"": {
"handlers": ["console", "file"],
"level": logging_level,
"propagate": True
},
}
}
return logging_config
def initialise_logs(log_file_path: str, debug=True):
logging_ini_file = get_logging_settings_path()
logging.config.fileConfig(logging_ini_file, defaults={'log_file_path': log_file_path},
disable_existing_loggers=False)
logger_config = logging.getLogger('root')
if int(debug) == 1:
logger_config.setLevel(logging.DEBUG)
else:
logger_config.setLevel(logging.INFO)
return logger_config
def load_vcs_instances(file_path: str) -> Dict[str, VCSInstanceRuntime]:
vcs_instances_list: List[VCSInstanceRuntime] = parse_vcs_instances_file(file_path)
if not vcs_instances_list:
logger.critical(f"Exiting due to issues in VCS Instances definition in file {file_path}")
sys.exit(-1)
vcs_instances_map: Dict[str, VCSInstanceRuntime] = \
{vcs_instance.name: vcs_instance for vcs_instance in vcs_instances_list}
return vcs_instances_map
def get_rule_pack_version_from_file(file_content: str) -> Optional[str]:
toml_rule_dictionary = tomlkit.loads(file_content)
rule_pack_version = toml_rule_dictionary["version"] if "version" in toml_rule_dictionary else None
return rule_pack_version | /resc_vcs_scanner-2.0.0.tar.gz/resc_vcs_scanner-2.0.0/src/vcs_scanner/common.py | 0.447943 | 0.160135 | common.py | pypi |
from graphene import Schema
from rescape_python_helpers import ramda as R
from rescape_graphene.schema_models.token_schema import RescapeTokenMutation, RescapeTokenQuery
from rescape_graphene.schema_models.user_schema import UserQuery, UserMutation
def create_query_mutation_schema(class_config):
"""
Creates a schema from defaults or allows overrides of any of these schemas
Each arg if overridden must provide a dict with a query and mutation key, each pointing to the
override query and mutation graphene.ObjectType
:param class_config
:param class_config.user_group: Handles User and Group queries and mutations (defined in rescape_graphene)
:param class_config.user_group_state: Handles UserState and GroupState queries and mutations. See the default UserState
and GroupState for an example
:param class_config.region: Handles Region queries and mutations. See the default Region for an example
:param class_config.project: Handles Project queries and mutations. See the default Project for an example
:param class_config.location: Handles Location queries and mutations. See the default Location for an example
:return:
"""
obj = create_query_and_mutation_classes(class_config)
schema = Schema(query=R.prop('query', obj), mutation=R.prop('mutation', obj))
return dict(query=R.prop('query', obj), mutation=R.prop('mutation', obj), schema=schema)
def create_schema(class_config):
return R.prop('schema', create_query_mutation_schema(class_config))
def create_query_and_mutation_classes(query_and_mutation_class_lookups):
"""
Creates a Query class and Mutation classs from defaults or allows overrides of any of these schemas
Each arg if overriden must provide a dict with a query and mutation key, each pointing to the
override query and mutation graphene.ObjectType
:param class_config: Handles User and Group queries and mutations (defined in rescape_graphene)
:param class_config.region: Handles Region queries and mutations. See the default Region for an example
:param class_config.project: Handles Project queries and mutations. See the default Project for an example
:param class_config.location: Handles Location queries and mutations. See the default Location for an example
:return: A dict with query and mutation for the two dynamic classes
"""
class Query(
UserQuery,
RescapeTokenQuery,
*R.map_with_obj_to_values(
lambda k, v: R.prop('query', v), query_and_mutation_class_lookups
)
):
pass
class Mutation(
UserMutation,
RescapeTokenMutation,
*R.map_with_obj_to_values(
lambda k, v: R.prop('mutation', v), query_and_mutation_class_lookups
)
):
pass
return dict(query=Query, mutation=Mutation) | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/schema.py | 0.912181 | 0.512022 | schema.py | pypi |
from json import dumps
from django.db.models import ForeignKey
from django.utils.timezone import now
from rescape_python_helpers import ramda as R, compact
def find_scope_instances_by_id(model, scope_ids):
return model.objects.all_with_deleted().filter(id__in=scope_ids)
def find_scope_instances(user_state_scope, new_data):
"""
Retrieve the scope instances to verify the Ids.
Scope instances must have ids unless they are allowed to be created/updated
during the userState mutation (such as searchLocations)
:param new_data: The data to search
:param user_state_scope Dict with 'pick' in the shape of the instances we are looking for in new_data,
e.g. dict(userRegions={region: True}) to search new_data.userRegions[] for all occurrences of {region:...}
and 'key' which indicates the actually key of the instance (e.g. 'region' for regions)
:return: dict(
instances=Instances actually in the database,
)
"""
def until(key, value):
return key != R.prop('key', user_state_scope)
return R.compose(
lambda scope_dict: dict(
# See which instances with ids are actually in the database
# If any are missing we have an invalid update or need to create those instances if permitted
instances=list(
find_scope_instances_by_id(R.prop('model', user_state_scope), scope_dict['scope_ids'])
),
# The path from userRegions or userProjects to the scope instances, used to replace
# a null update value with the existing values
user_scope_path=list(R.keys(R.flatten_dct(user_state_scope, '.')))[0],
**scope_dict
),
lambda scope_objs: dict(
# Unique by id or accept if there is no id, this loses data, but it's just for validation
scope_objs=R.unique_by(lambda obj: R.prop_or(str(now()), 'id', obj['value']), scope_objs),
scope_ids=R.unique_by(
R.identity,
compact(
R.map(
lambda scope_obj: R.prop_or(None, 'id', scope_obj['value']), scope_objs
)
)
)
),
# Use the pick key property to find the scope instances in the data
# If we don't match anything we can get null or an empty item. Filter/compact these out
R.filter(
lambda obj: obj['value'] and (not isinstance(obj['value'], list) or R.length(obj['value']) != 0)
),
R.map(
lambda pair: dict(key=pair[0], value=pair[1])
),
lambda flattened_data: R.to_pairs(flattened_data),
lambda data: R.flatten_dct_until(
R.pick_deep_all_array_items(R.prop('pick', user_state_scope), data),
until,
'.'
)
)(new_data)
def validate_and_mutate_scope_instances(scope_instances_config, data):
"""
Inspect the data and find all scope instances within data
For UserState, for instance, this includes userRegions[*].region, userProject[*].project and within
userRegions and userProjects userSearch.userSearchLocations[*].search_location and whatever the implementing
libraries define in addition
:param scope_instances_config: See user_state_schema.user_state_scope_instances_config for an example
:param data: The instance data field containing the scope instances
:return: The updated data with scope instances possibly created/updated if allowed. If creates occur
then the scope instance will now have an id. Otherwise no changes are visible
"""
validated_scope_objs_instances_and_ids_sets = R.map(
lambda scope_instance_config: find_scope_instances(scope_instance_config, data),
scope_instances_config
)
# Some scope instances can be created or modified when embedded in the data. This helps
# make mutation of the instance, such as UserState,
# a one step process, so that new Projects, SearchLocations, etc. can
# be created without having to call mutation for them separately ahead of times, which would create
# a series of mutations that weren't failure-protected as a single transaction
for i, validated_scope_objs_instances_and_ids in enumerate(validated_scope_objs_instances_and_ids_sets):
scope = R.merge(
scope_instances_config[i],
dict(model=scope_instances_config[i]['model'].__name__)
)
# If any scope instances with an id specified in new_data don't exist, throw an error
if R.length(validated_scope_objs_instances_and_ids['scope_ids']) != R.length(
validated_scope_objs_instances_and_ids['instances']):
ids = R.join(', ', validated_scope_objs_instances_and_ids['scope_ids'])
instances_string = R.join(', ', R.map(lambda instance: str(instance),
validated_scope_objs_instances_and_ids['instances']))
raise Exception(
f"For scope {dumps(scope)} Some scope ids among ids:[{ids}] being saved in user state do not exist. Found the following instances in the database: {instances_string or 'None'}. UserState.data is {dumps(data)}"
)
# Create/Update any scope instances that permit it
model = scope_instances_config[i]['model']
data = handle_can_mutate_related(
model,
scope,
data,
validated_scope_objs_instances_and_ids
)
return data
def handle_can_mutate_related(model, related_model_scope_config, data, validated_scope_objs_instances_and_ids):
"""
Mutates the given related models of an instance if permitted
See rescape-region's UserState for a working usage
:param model: The related model
:param related_model_scope_config: Configuration of the related model relative to the referencing instance
:param data: The data containing thphee related models dicts to possibly mutate with
:param validated_scope_objs_instances_and_ids: Config of the related objects that have been validated as
existing in the database for objects not being created
:return: Possibly mutates instances, returns data with newly created ids set
"""
def make_fields_unique_if_needed(scope_obj):
# If a field needs to be unique, like a key, call it's unique_with method
def x(key, value):
func = R.item_str_path_or(None, f'field_config.{key}.unique_with', related_model_scope_config)
if func:
return func(scope_obj)[key]
return value
return R.map_with_obj(
lambda key, value: x(key, value),
scope_obj
)
def convert_foreign_key_to_id(scope_obj):
# Find ForeignKey attributes and map the class field name to the foreign key id field
# E.g. region to region_id, user to user_id, etc
converters = R.compose(
R.from_pairs,
R.map(
lambda field: [field.name, field.attname]
),
R.filter(
lambda field: R.isinstance(ForeignKey, field)
)
)(model._meta.fields)
# Convert scopo_obj[related_field] = {id: x} to scope_obj[related_field_id] = x
return R.from_pairs(
R.map_with_obj_to_values(
lambda key, value: [converters[key], R.prop('id', value)] if
R.has(key, converters) else [key, value],
scope_obj
)
)
def omit_to_many(scope_obj):
return R.omit(R.map(R.prop('attname'), model._meta.many_to_many), scope_obj)
# This indicates that scope_objs were submitted that didn't have ids
# This is allowed if those scope_objs can be created/updated when the userState is mutated
if R.prop_or(False, 'can_mutate_related', related_model_scope_config):
for scope_obj_key_value in validated_scope_objs_instances_and_ids['scope_objs']:
scope_obj = scope_obj_key_value['value']
scope_obj_path = scope_obj_key_value['key']
if R.length(R.keys(R.omit(['id'], scope_obj))):
modified_scope_obj = R.compose(
convert_foreign_key_to_id,
omit_to_many,
make_fields_unique_if_needed
)(scope_obj)
if R.prop_or(False, 'id', scope_obj):
# Update, we don't need the result since it's already in user_state.data
instance, created = model.objects.update_or_create(
defaults=R.omit(['id'], modified_scope_obj),
**R.pick(['id'], scope_obj)
)
else:
# Create
instance = model(**modified_scope_obj)
instance.save()
# We need to replace the object
# passed in with an object containing the id of the instance
data = R.fake_lens_path_set(
scope_obj_path.split('.'),
R.pick(['id'], instance),
data
)
for to_many in model._meta.many_to_many:
if to_many.attname in R.keys(scope_obj):
# Set existing related values to the created/updated instances
getattr(instance, to_many.attname).set(R.map(R.prop('id'), scope_obj[to_many.attname]))
return data | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/graphql_helpers/mutate_related_helpers.py | 0.605566 | 0.378488 | mutate_related_helpers.py | pypi |
from inflection import underscore
from rescape_python_helpers import ramda as R
from rescape_python_helpers.functional.ramda import pick_deep
import re
from rescape_graphene.graphql_helpers.schema_helpers import process_filter_kwargs
from graphql import format_error
def quiz_model_query(client, model_query_function, result_name, variables, expect_length=1):
"""
Tests a query for a model with variables that produce exactly one result
:param client: Apollo client
:param model_query_function: Query function expecting the client and variables
:param result_name: The name of the result object in the data object
:param variables: key value variables for the query
:param expect_length: Default 1. Optional number items to expect
:return: returns the result for further assertions
"""
all_result = model_query_function(client)
assert not R.has('errors', all_result), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', all_result)))
result = model_query_function(
client,
variables=variables
)
# Check against errors
assert not R.has('errors', result), R.dump_json(R.map(lambda e: format_error(e),R.prop('errors', result)))
# Simple assertion that the query looks good
assert expect_length == R.length(R.item_path(['data', result_name], result))
return result
def quiz_model_paginated_query(client, model_class, paginated_query, result_name, page_count_expected, props,
omit_props, order_by=None, page_size=1):
"""
Tests a pagination query for a model with variables
:param client: Apollo client
:param model_class: Model class
:param paginated_query: Model's pagination query
:param page_count_expected: The number of pages expected when the page_size is 1, in other words the
number of items in the database that match props
:param result_name: The name of the results in data.[result_name].objects
:param props: The props to query, not including pagination
:param omit_props: Props to omit from assertions because they are nondeterminate
:param order_by: Order by page-level prop
:param page_size: Default 1
:return the first result (first page) and final result (last page) for further testing:
"""
result = paginated_query(
client,
variables=dict(
page=1,
page_size=page_size,
order_by=order_by,
objects=R.to_array_if_not(props)
)
)
# Check against errors
assert not R.has('errors', result), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', result)))
first_page_objects = R.item_path(['data', result_name, 'objects'], result)
# Assert we got 1 result because our page is size 1
assert page_size == R.compose(
R.length,
R.map(R.omit(omit_props)),
)(first_page_objects)
remaining_ids = list(
set(
R.map(
R.prop('id'),
model_class.objects.filter(
*process_filter_kwargs(model_class, **R.map_keys(underscore, props))
).order_by(*order_by.split(','))
)
) -
set(R.map(R.compose(int, R.prop('id')), first_page_objects))
)
page_info = R.item_path(['data', result_name], result)
# We have page_size pages so there should be a total number of pages
# of what we specified for page_count_expected
assert page_info['pages'] == page_count_expected
assert page_info['hasNext'] == True
assert page_info['hasPrev'] == False
# Get the final page
new_result = paginated_query(
client,
variables=dict(
page=page_count_expected,
page_size=page_info['pageSize'],
order_by=order_by,
objects=R.to_array_if_not(props)
)
)
# Make sure the new_result matches one of the remaining ids
assert R.contains(
R.item_path(['data', result_name, 'objects', 0, 'id'], new_result),
remaining_ids
)
new_page_info = R.item_path(['data', result_name], new_result)
# Still expect the same page count
assert new_page_info['pages'] == page_count_expected
# Make sure it's the last page
assert new_page_info['hasNext'] == False
assert new_page_info['hasPrev'] == True
return [result, new_result]
def quiz_model_versioned_query(client, model_class, model_query, result_name, version_count_expected, props,
omit_props):
"""
Tests a versioned query for a model with variables
:param client: Apollo client
:param model_class: Model class
:param model_query: Model's query that should return one result (as a filter)
number of items in the database that match props
:param result_name: The name of the results in data.[result_name].objects
:param version_count_expected The number of versions of the instance we expect
:param props: The props to query to find a single instance. Should just be {id:...}
:param omit_props: Props to omit from assertions because they are nondeterminate
:return:
"""
result = model_query(
client,
variables=dict(
objects=R.to_array_if_not(dict(
instance=props
))
)
)
# Check against errors
assert not R.has('errors', result), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', result)))
assert R.compose(
R.length,
R.item_str_path_or([], f'data.{result_name}.objects')
)(result) == version_count_expected
def quiz_model_mutation_create(client, graphql_update_or_create_function, result_path, values,
second_create_results=None, second_create_does_update=False):
"""
Tests a create mutation for a model
:param client: The Apollo Client
:param graphql_update_or_create_function: The update or create mutation function for the model. Expects client and input values
:param result_path: The path to the result of the create in the data object (e.g. createRegion.region)
:param values: The input values to use for the create
:param second_create_results: Object, tests a second create if specified. Use to make sure that create with the same values
creates a new instance or updates, depending on what you expect it to do.
The values of this should be regexes that match the created instance
:param second_create_does_update: Default False. If True expects a second create with the same value to update rather than create a new instance
:return: Tuple with two return values. The second is null if second_create_results is False
"""
result = graphql_update_or_create_function(client, values=values)
result_path_partial = R.item_str_path(f'data.{result_path}')
assert not R.has('errors', result), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', result)))
# Get the created value, using underscore to make the camelcase keys match python keys
created = R.map_keys(
lambda key: underscore(key),
result_path_partial(result)
)
# get all the keys in values that are in created. This should match values if created has everything we expect
assert values == pick_deep(created, values)
# Try creating with the same values again, unique constraints will apply to force a create or an update will occur
if second_create_results:
new_result = graphql_update_or_create_function(client, values)
assert not R.has('errors', new_result), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', new_result)))
created_too = result_path_partial(new_result)
if second_create_does_update:
assert created['id'] == created_too['id']
if not second_create_does_update:
assert created['id'] != created_too['id']
for path, value in R.flatten_dct(second_create_results, '.').items():
assert re.match(value, R.item_str_path_or(None, path, created_too))
else:
new_result = None
return result, new_result
def quiz_model_mutation_update(client, graphql_update_or_create_function, create_path, update_path, values,
update_values):
"""
Tests an update mutation for a model by calling a create with the given values then an update
with the given update_values (plus the create id)
:param client: The Apollo Client
:param graphql_update_or_create_function: The update or create mutation function for the model. Expects client and input values
:param create_path: The path to the result of the create in the data object (e.g. createRegion.region)
:param update_path: The path to the result of the update in the data object (e.g. updateRegion.region)
:param values: The input values to use for the create
:param update_values: The input values to use for the update. This can be as little as one key value
:return:
"""
result = graphql_update_or_create_function(client, values=values)
assert not R.has('errors', result), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', result)))
# Extract the result and map the graphql keys to match the python keys
created = R.compose(
lambda r: R.map_keys(lambda key: underscore(key), r),
lambda r: R.item_str_path(f'data.{create_path}', r)
)(result)
# look at the users added and omit the non-determinant dateJoined
assert values == pick_deep(created, values)
# Update with the id and optionally key if there is one + update_values
update_result = graphql_update_or_create_function(
client,
R.merge_all([
dict(
id=created['id']
),
dict(
key=created['key']
) if R.prop_or(False, 'key', created) else {},
update_values
])
)
assert not R.has('errors', update_result), R.dump_json(R.map(lambda e: format_error(e), R.prop('errors', update_result)))
updated = R.item_str_path(f'data.{update_path}', update_result)
assert created['id'] == updated['id']
assert update_values == pick_deep(
update_values,
updated
)
return result, update_result | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/graphql_helpers/schema_validating_helpers.py | 0.766031 | 0.511656 | schema_validating_helpers.py | pypi |
from inflection import camelize
from graphene import ObjectType, Scalar
import inspect
from rescape_python_helpers import ramda as R, map_keys_deep
import numbers
import json
def call_if_lambda(maybe_lambda):
"""
When fields_dict or graphene_type is a lambda it means it needs lazy evaluation to prevent circular dependencies
:param maybe_lambda:
:return:
"""
return R.when(R.isfunction, lambda f: f())(maybe_lambda)
def handleGrapheneTypes(key, value):
"""
Handle related Graphene types. This is recursive since it calls dump_grpahql_keys
:param key:
:param value:
:return:
"""
return '''%s {
%s
}''' % (camelize(key, False), dump_graphql_keys(call_if_lambda(R.prop('fields', value))))
def dump_graphql_keys(dct):
"""
Convert a dict to a graphql input parameter keys in the form
Also camelizes keys if the are slugs and handles complex types. If a value has read=IGNORE it is omitted
key1
key2
key3
key4 {
subkey1
...
}
...
:param dct: keyed by field
:return:
"""
from rescape_graphene.graphql_helpers.schema_helpers import IGNORE, DENY
return R.join('\n', R.values(R.map_with_obj(
dump_graphene_type,
R.filter_dict(
lambda key_value: not R.compose(
lambda v: R.contains(v, [IGNORE, DENY]),
lambda v: R.prop_or(None, 'read', v)
)(key_value[1]),
dct
)
)))
def dump_graphene_type(key, value):
"""
Dumps the graphql query representation of a scalar Graphene type or a complex time, in the latter case
recursing
:param key:
:param value:
:return:
"""
typ = resolve_field_type(value)
return handleGrapheneTypes(key, value) if \
R.isfunction(typ) or (inspect.isclass(typ) and issubclass(typ, (ObjectType))) else \
camelize(key, False)
def camelize_graphql_data_object(dct):
"""
Camelize a dict to a graphql input parameter key values in the form
:param dct:
:return:
"""
return map_keys_deep(lambda key, _: R.when(
# Skip array indices
R.isinstance(str),
lambda k: camelize(k, False)
)(key), dct)
def dump_graphql_data_object(dct):
"""
Stringify a dict to a graphql input parameter key values in the form
Also camelizes keys if the are slugs
{"key1": "string value1", "key2": "number2", ...}
:param dct:
:return:
"""
if dct == None:
return 'null'
elif isinstance(dct, dict):
return '{%s}' % R.join(
', ',
R.map(
lambda key_value: R.join(
': ',
[
camelize(quote(key_value[0]), False),
dump_graphql_data_object(key_value[1])
]
),
dct.items()
)
)
elif isinstance(dct, list):
return f"[{R.join(', ', R.map(lambda item: dump_graphql_data_object(item), dct))}]"
else:
return quote(dct)
def full_stack():
import traceback, sys
exc = sys.exc_info()[0]
stack = traceback.extract_stack()[:-1] # last one would be full_stack()
if not exc is None: # i.e. if an exception is present
del stack[-1] # remove call of full_stack, the printed exception
# will contain the caught exception caller instead
trc = 'Traceback (most recent call last):\n'
stackstr = trc + ''.join(traceback.format_list(stack))
if not exc is None:
stackstr += ' ' + traceback.format_exc().lstrip(trc)
return stackstr
def quote(value, tab=-1):
"""
Puts string but not numbers.
If value is a dict it is represented as as
key: value,
key: value
etc, where each value is recursively processed
:param value:
:return:
"""
if isinstance(value, (bool)):
# Python believes bools are numbers, so list this first
return str(value).lower()
elif isinstance(value, (numbers.Number)):
return value
elif isinstance(value, (dict)):
return quote_dict(value, tab + 1)
elif isinstance(value, (list, tuple)):
return quote_list(value, tab + 1)
else:
return quote_str(value)
def quote_dict(dct, tab):
"""
Recursively quotes dict values
:param dct:
:return:
"""
t = '\t' * tab
# The middle arg here is a newline if value is another dict, otherwise it's a space
dct_sring = '\n{0}'.format(t).join(
[
'%s:%s%s' % (
camelize(key, False),
'\n{0}'.format(t) if isinstance(value, (dict)) else ' ',
str(quote(value, tab))
) for key, value in dct.items()
])
return '{0}{{\n{1}{2}\n{3}}}'.format(t, t, dct_sring, t)
def quote_list(lst, tab):
"""
Recursively quotes list values
:param lst
:return:
"""
t = '\t' * tab
return '[\n{0}{1}\n]'.format(
t,
'\n{0}'.format(t).join(
R.map(lambda item: str(quote(item, tab)), lst)
)
)
def quote_str(str):
return '"{0}"'.format(str)
@R.curry
def resolve_field_type(field_config):
field_type = R.prop_or(R.prop_or(None, 'graphene_type', field_config), 'type', field_config)
if not field_type:
raise Exception(f'field_config {json.dumps(field_config)} lacks a type or graphene_type')
return field_type | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/graphql_helpers/graphene_helpers.py | 0.672547 | 0.394872 | graphene_helpers.py | pypi |
from operator import itemgetter
import graphene
from graphene import Int, ObjectType, List, Field
from graphene_django import DjangoObjectType
from rescape_python_helpers import ramda as R
from reversion.models import Version, Revision
from rescape_graphene.graphql_helpers.schema_helpers import DENY, merge_with_django_properties, \
top_level_allowed_filter_arguments
from rescape_graphene.schema_models.user_schema import UserType, user_fields
def get_versioner(single_object_qs, versions_type, **kwargs):
"""
Cre
First we create a little helper function, becase we will potentially have many PaginatedTypes
and we will potentially want to turn many querysets into paginated results:
:param single_object_qs: The queryset that must return exactly one instance
:param versions_type Class created by create_versions_type to hold all the versions of one model instance
:param kwargs: Addition kwargs to versioned_type, usually not needed
:return:
"""
instance = R.head(single_object_qs)
versions = Version.objects.get_for_object(instance)
return versions_type(
objects=list(versions), # R.map(lambda version: version._object_version.object, list(versions)),
**kwargs
)
class RevisionType(DjangoObjectType):
id = graphene.Int(source='pk')
class Meta:
model = Revision
# Merge the Revision Django properties with our field config
# Revision is managed by django-reversion and can never be updated from the API
revision_fields = merge_with_django_properties(RevisionType, dict(
id=dict(create=DENY, update=DENY),
date_created=dict(create=DENY, update=DENY),
# This is a Foreign Key. Graphene generates these relationships for us, but we need it here to
# support our Mutation subclasses and query_argument generation
user=dict(graphene_type=UserType, fields=user_fields, create=DENY, update=DENY),
comment=dict(create=DENY, update=DENY)
))
class VersionType(DjangoObjectType):
id = graphene.Int(source='pk')
class Meta:
model = Version
def create_version_type(model_object_type, model_object_type_fields):
# We can't assign Version as the Meta model because multiple classes would point at the same model,
# which probably isn't allowed
def resolve_instance(parent, info, **kwargs):
instance = parent._object_version.object
# Inject the version so RevisionModelMixin knows how to handle
instance._version = parent
return instance
version_type_model = type(
f'VersionTypeModelFor{model_object_type.__name__}',
(ObjectType,),
dict(
id=Int(),
revision=Field(RevisionType),
instance=Field(model_object_type, resolver=resolve_instance)
)
)
# Merge the Revision Django properties with our field config
versioned_fields = merge_with_django_properties(VersionType, dict(
# Revision
revision=dict(
type=RevisionType,
graphene_type=RevisionType,
fields=revision_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args)
),
instance=dict(
type=model_object_type,
graphene_type=model_object_type,
fields=model_object_type_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args)
)
))
return dict(type=version_type_model, fields=versioned_fields)
def create_version_container_type(model_object_type, model_object_type_fields):
"""
DjangObjectType and fields to hold all the versions of one instance
:param model_object_type:
:param model_object_type_fields:
:return:
"""
(version_type, version_type_fields) = itemgetter('type', 'fields')(
create_version_type(model_object_type, model_object_type_fields)
)
versions_type_model = type(
f'VersionContainerTypeModelFor{model_object_type.__name__}',
(ObjectType,),
dict(
objects=List(version_type)
)
)
# Merge the Revision Django properties with our field config
versions_fields = merge_with_django_properties(VersionType, dict(
# Versions
objects=dict(
type=version_type,
graphene_type=version_type,
fields=version_type_fields,
type_modifier=lambda *type_and_args: List(*type_and_args)
)
))
return dict(type=versions_type_model, fields=versions_fields)
def resolve_version_instance(model_versioned_type, resolver, **kwargs):
"""
Queries for the version instance by for the given model type using the given resolver
The kwargs must contain objects: [{id: the id}]
:param model_versioned_type: Graphene model class created by create_version_container_type
:param resolver: Resolver for the model
:param kwargs: Must contain objects: [{id: the id}] to resolve the versions of the instance given by id
:return:
"""
# We technically receive an array but never accept more than the first item
obj = R.head(R.prop('objects', kwargs))
if not R.item_str_path_or(None, 'instance.id', obj):
raise Exception(
f"id required in kwargs.objects.instance for revisions query, but got: {kwargs}")
# Create the filter that only returns 1 location
objs = resolver('filter', **R.prop_or({}, 'instance', obj)).order_by('id')
return get_versioner(
objs,
model_versioned_type,
)
def versioning_allowed_filter_arguments(fields, graphene_type):
"""
TODO no longer needed. version props filter props are filtered out in schema_helpers
top_level_allowed_filter_arguments for versioned types so we don't add filters to the top-level
props like revisionContains. We don't (currenlty) want a filter like revisionContains
:param fields:
:param graphene_type:
:return:
"""
return top_level_allowed_filter_arguments(fields, graphene_type) | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/django_helpers/versioning.py | 0.730674 | 0.26462 | versioning.py | pypi |
import inspect
import uuid
from django.db.models import Q
from rescape_python_helpers import ramda as R
def default_strategy(matches, prop_value, i):
return '%s%s' % (prop_value, str(uuid.uuid1())[0:10])
@R.curry
def increment_prop_until_unique(django_class, strategy, prop, additional_filter_props, django_instance_data):
"""
Increments the given prop of the given django as given by data['prop'] until it matches nothing in
the database. Note that this includes checks against soft deleted instances where the deleted prop is non-null
(assumes the use of SafeDeleteModel on the model class)
:param django_class: Django class to query
:param prop: The prop to ensure uniqueness
:param additional_filter_props: Other props, such as user id, to filter by. This allows incrementing a name
dependent on the current user, for instance. This can be a dict or a function expecting the django_instance_data
and returning a dict
:param strategy: function to try to make a value unique. Expects all potential matching values--all values
that begin with the value of the property--the prop value, and the current index. It's called for each matching
value to guarentee the strategy will eventually get a unique value. For instance, if prop is key and it equals
'foo', and 'foo', 'foo1', 'foo2', and 'foo3' are in the db, strategy will be called with an array of 4 values 4
times, with index 0 through 3. If strategy is None the default strategy is to append index+1 to the duplicate name
:param django_instance_data: The data containing the prop
:return: The data merged with the uniquely named prop
"""
prop_value = R.prop(prop, django_instance_data)
pk = R.prop_or(None, 'id', django_instance_data)
strategy = strategy or default_strategy
# Include deleted objects here. It's up to additional_filter_props to deal with the deleted=date|None property
all_objects = django_class.all_objects if R.has('all_objects', django_class) else django_class.objects
matching_values = all_objects.filter(
# Ignore value matching the pk if this is an update operation.
# In other words we can update the key to what it already is, aka do nothing
*R.compact([
~Q(id=pk) if pk else None,
]),
**R.merge(
{'%s__startswith' % prop: prop_value},
# Give the filter props the instance f they are a function
R.when(
lambda f: inspect.isfunction(f),
lambda f: f(django_instance_data)
)(additional_filter_props or {})
)
).values_list(prop, flat=True).order_by(prop)
success = prop_value
for i, matching_key in enumerate(matching_values):
success = None
attempt = strategy(matching_values, prop_value, i)
if attempt not in matching_values:
success = attempt
break
if not success:
raise Exception("Could not generate unique prop value %s. The following matching ones exist %s" % (
prop_value, matching_values))
return R.merge(django_instance_data, {prop: success})
def enforce_unique_props(property_fields, django_instance_data):
"""
Called in the mutate function of the Graphene Type class. Ensures that all properties marked
as unique_with
:param property_fields: The Graphene Type property fields dict. This is checked for unique_with,
which when present points at a function that expects the django_instance_data and returns the django_instance_data
modified so that the property in question has a unique value.
:param django_instance_data: dict of an instance to be created or updated
:param for_update if True this is for an update mutation so props are not required
:return: The modified django_instance_data for any property that needs to have a unique value
"""
# If any prop needs to be unique then run its unique_with function, which updates it to a unique value
# By querying the database for duplicate. This is mainly for non-pk fields like a key
return R.reduce(
lambda reduced, prop_field_tup: prop_field_tup[1]['unique_with'](reduced) if
R.has(prop_field_tup[0], reduced) and R.prop_or(False, 'unique_with', prop_field_tup[1]) else
reduced,
django_instance_data,
property_fields.items()
) | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/django_helpers/write_helpers.py | 0.659295 | 0.445047 | write_helpers.py | pypi |
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from rescape_python_helpers import ramda as R
from graphene import Int, Boolean, ObjectType, List, String
from rescape_graphene.graphql_helpers.schema_helpers import DENY, top_level_allowed_filter_arguments
def get_paginator(qs, page_size, page, paginated_type, order_by, **kwargs):
"""
Adapted from https://gist.github.com/mbrochh/f92594ab8188393bd83c892ef2af25e6
Creates a pagination_type based on the paginated_type function
:param qs:
:param page_size:
:param page:
:param paginated_type:
:param order_by default id. Optional kwarg to order by in django format as a string, e.g. '-key,+name'
:param kwargs: Additional kwargs to pass paginated_type function, usually unneeded
:return:
"""
p = Paginator(qs.order_by(*(order_by or 'id').split(',')), page_size)
try:
page_obj = p.page(page)
except PageNotAnInteger:
page_obj = p.page(1)
except EmptyPage:
page_obj = p.page(p.num_pages)
return paginated_type(
page=page_obj.number,
pages=p.num_pages,
page_size=page_size,
has_next=page_obj.has_next(),
has_prev=page_obj.has_previous(),
objects=page_obj.object_list,
**R.omit(['order_by'], kwargs)
)
def create_paginated_type_mixin(model_object_type, model_object_type_fields):
"""
Constructs a PaginatedTypeMixin class and the fields object (for use in allowed filtering).
The pagination is for the given model_object_type
:param model_object_type: E.g. LocationType
:param model_object_type_fields: The fields of the model_object_type, e.g. location_fields
:return: An object containing {type: The class, fields: The field}
"""
"""
Mixin for adding pagination to any Graphene Type
"""
paginated_type_mixin = type(
f'PaginatedTypeMixinFor{model_object_type.__name__}',
(ObjectType,),
dict(
# order_by is extracted for ordering in django style, like '+key,-name'
order_by=String(required=False),
page_size=Int(),
page=Int(),
pages=Int(),
has_next=Boolean(),
has_prev=Boolean(),
objects=List(model_object_type),
)
)
paginated_fields = dict(
page_size=dict(type=Int, graphene_type=Int, create=DENY, update=DENY),
order_by=dict(type=String, graphene_type=String, create=DENY, update=DENY),
page=dict(type=Int, graphene_type=Int, create=DENY, update=DENY),
pages=dict(type=Int, graphene_type=Int, create=DENY, update=DENY),
has_next=dict(type=Boolean, graphene_type=Boolean, create=DENY, update=DENY),
has_prev=dict(type=Boolean, graphene_type=Boolean, create=DENY, update=DENY),
objects=dict(
type=model_object_type,
graphene_type=model_object_type,
fields=model_object_type_fields,
type_modifier=lambda *type_and_args: List(*type_and_args)
)
)
return dict(type=paginated_type_mixin, fields=paginated_fields)
def resolve_paginated_for_type(paginated_type, type_resolver, **kwargs):
"""
Resolver for paginated types
:param paginated_type: The paginated Type, e.g. LocationPaginationType
:param type_resolver: The resolver for the non-paginated type, e.g. location_resolver
:param kwargs: The kwargs Array of prop sets for the non-paginated objects in 'objects'.
Normally it's just a 1-item array.
Other required kwargs are for pagination are page_size and page and optional order_by
:return: The paginated query
"""
def reduce_or(q_expressions):
return R.reduce(
lambda qs, q: qs | q if qs else q,
None,
q_expressions
)
objects = R.prop_or({}, 'objects', kwargs)
instances = reduce_or(R.map(
lambda obj: type_resolver('filter', **obj),
objects
))
return get_paginator(
instances,
R.prop('page_size', kwargs),
R.prop('page', kwargs),
paginated_type,
R.prop('order_by', kwargs)
)
def pagination_allowed_filter_arguments(fields, graphene_type):
"""
# TODO Filtering in schema_helperws keeps page variables from being having filters, so this function isn't
# really needed now
top_level_allowed_filter_arguments for paginated types so we don't add filters to the top-level
props like page. We don't want a filter like pageContains
:param fields:
:param graphene_type:
:return:
"""
return top_level_allowed_filter_arguments(fields, graphene_type) | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/django_helpers/pagination.py | 0.696681 | 0.219379 | pagination.py | pypi |
import graphene
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import AnonymousUser
from graphene import InputObjectType, ObjectType
from graphene_django.types import DjangoObjectType
from graphql_jwt.decorators import login_required, staff_member_required
from rescape_python_helpers import ramda as R
from .django_object_type_revisioned_mixin import reversion_types, DjangoObjectTypeRevisionedMixin
from ..django_helpers.write_helpers import increment_prop_until_unique
from ..graphql_helpers.schema_helpers import input_type_fields, REQUIRE, DENY, CREATE, \
merge_with_django_properties, input_type_parameters_for_update_or_create, UPDATE, \
guess_update_or_create, graphql_update_or_create, graphql_query, update_or_create_with_revision, \
top_level_allowed_filter_arguments, query_with_filter_and_order_kwargs
class UserType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
id = graphene.Int(source='pk')
class Meta:
model = get_user_model()
user_fields = merge_with_django_properties(UserType, dict(
id=dict(create=DENY, update=[REQUIRE]),
username=dict(create=[REQUIRE], unique_with=increment_prop_until_unique(get_user_model(), None, 'username', {})),
password=dict(create=[REQUIRE], read=DENY),
email=dict(create=[REQUIRE]),
is_superuser=dict(),
first_name=dict(create=REQUIRE),
last_name=dict(create=REQUIRE),
is_staff=dict(),
is_active=dict(),
date_joined=dict(create=DENY, update=DENY),
**reversion_types
))
class UserQuery(ObjectType):
current_user = graphene.Field(
UserType,
**top_level_allowed_filter_arguments(user_fields, UserType)
)
users = graphene.List(
UserType,
**top_level_allowed_filter_arguments(user_fields, UserType)
)
@staff_member_required
def resolve_users(self, info, **kwargs):
"""
Admin API method to filter by users
:param self:
:param info:
:param kwargs:
:return:
"""
return query_with_filter_and_order_kwargs(get_user_model(), **kwargs)
def resolve_current_user(self, info):
"""
Resolve the current user or return None if there isn't one
:param self:
:param info:
:return: The current user or None
"""
context = info.context
user = R.prop_or(None, 'user', context)
return user if not isinstance(user, AnonymousUser) else None
user_mutation_config = dict(
class_name='User',
crud={
CREATE: 'createUser',
UPDATE: 'updateUser'
},
resolve=guess_update_or_create
)
class UpsertUser(graphene.Mutation):
"""
Abstract base class for mutation
"""
user = graphene.Field(UserType)
@login_required
def mutate(self, info, user_data=None):
user_model = get_user_model()
data = R.merge(user_data, dict(password=make_password(R.prop('password', user_data), salt='not_random')) if
R.prop_or(False, 'password', user_data) else
{})
update_or_create_values = input_type_parameters_for_update_or_create(user_fields, data)
user, created = update_or_create_with_revision(user_model, update_or_create_values)
return UpsertUser(user=user)
class CreateUser(UpsertUser):
"""
Create User mutation class
"""
class Arguments:
user_data = type('CreateUserInputType', (InputObjectType,), input_type_fields(user_fields, CREATE, UserType))(
required=True)
class UpdateUser(UpsertUser):
"""
Update User mutation class
"""
class Arguments:
user_data = type('UpdateUserInputType', (InputObjectType,), input_type_fields(user_fields, UPDATE, UserType))(
required=True)
class UserMutation(ObjectType):
create_user = CreateUser.Field()
update_user = UpdateUser.Field()
graphql_update_or_create_user = graphql_update_or_create(user_mutation_config, user_fields)
graphql_query_users = graphql_query(UserType, user_fields, 'users')
graphql_query_current_user = graphql_query(UserType, user_fields, 'currentUser') | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/schema_models/user_schema.py | 0.524882 | 0.18054 | user_schema.py | pypi |
import graphene
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import Group
from graphene import InputObjectType
from graphene_django.types import DjangoObjectType
from rescape_python_helpers import ramda as R
from rescape_graphene.django_helpers.write_helpers import increment_prop_until_unique
from rescape_graphene.graphql_helpers.schema_helpers import input_type_fields, REQUIRE, DENY, CREATE, \
merge_with_django_properties, input_type_parameters_for_update_or_create, UPDATE, \
guess_update_or_create, graphql_update_or_create, graphql_query, update_or_create_with_revision
from rescape_graphene.schema_models.django_object_type_revisioned_mixin import reversion_types, \
DjangoObjectTypeRevisionedMixin
class GroupType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
class Meta:
model = Group
group_fields = merge_with_django_properties(GroupType, dict(
id=dict(create=DENY, update=[REQUIRE]),
name=dict(create=[REQUIRE], unique_with=increment_prop_until_unique(Group, None, 'name', {})),
**reversion_types
))
group_mutation_config = dict(
class_name='Group',
crud={
CREATE: 'createGroup',
UPDATE: 'updateGroup'
},
resolve=guess_update_or_create
)
class UpsertGroup(graphene.Mutation):
"""
Abstract base class for mutation
"""
group = graphene.Field(GroupType)
def mutate(self, info, group_data=None):
group_model = Group()
data = R.merge(group_data, dict(password=make_password(R.prop('password', group_data), salt='not_random')) if
R.prop_or(False, 'password', group_data) else
{})
update_or_create_values = input_type_parameters_for_update_or_create(group_fields, data)
group, created = update_or_create_with_revision(group_model, update_or_create_values)
return UpsertGroup(group=group)
class CreateGroup(UpsertGroup):
"""
Create Group mutation class
"""
class Arguments:
group_data = type('CreateGroupInputType', (InputObjectType,),
input_type_fields(group_fields, CREATE, GroupType))(
required=True)
class UpdateGroup(UpsertGroup):
"""
Update Group mutation class
"""
class Arguments:
group_data = type('UpdateGroupInputType', (InputObjectType,),
input_type_fields(group_fields, UPDATE, GroupType))(
required=True)
graphql_update_or_create_group = graphql_update_or_create(group_mutation_config, group_fields)
graphql_query_groups = graphql_query(GroupType, group_fields, 'groups') | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/schema_models/group_schema.py | 0.418222 | 0.223155 | group_schema.py | pypi |
import json
import graphene
from graphene import String, List
from graphql.language import ast
from rescape_python_helpers import ramda as R
from rescape_python_helpers import geometrycollection_from_feature_collection
from rescape_graphene.graphql_helpers.json_field_helpers import resolver_for_dict_list
from rescape_graphene.graphql_helpers.schema_helpers import type_modify_fields
from rescape_graphene.schema_models.geojson.types.geojson_data_schema import FeatureDataType, feature_data_type_fields
__all__ = [
'GrapheneFeatureCollection',
'FeatureCollectionDataType',
]
class GrapheneFeatureCollection(graphene.Scalar):
"""
Graphene representation for a GeoDjango FeatureCollection
"""
class Meta:
description = """
`Geometry` scalar may be represented in a few ways:
- Well-known text (WKT)
- Hexadecimal (HEX)
- GeoJSON
"""
@classmethod
def serialize(cls, value):
return json.loads(value.geojson)
@classmethod
def parse_literal(cls, node):
if isinstance(node, ast.StringValue):
return cls.parse_value(node.value)
return None
@classmethod
def parse_value(cls, value):
return geometrycollection_from_feature_collection(
dict(type='FeatureCollection', features=R.map(
lambda geometry: dict(type='Feature', geometry=geometry),
value['geometries'])
)
)
feature_collection_data_type_fields = dict(
# type is always 'FeatureCollection'
type=dict(type=String),
features=dict(
type=FeatureDataType,
graphene_type=FeatureDataType,
fields=feature_data_type_fields,
type_modifier=lambda *type_and_args: List(*type_and_args, resolver=resolver_for_dict_list)
),
generator=dict(type=String),
copyright=dict(type=String)
)
# represents a geojson holding a feature collection
FeatureCollectionDataType = type(
'FeatureCollectionDataType',
(graphene.ObjectType,),
type_modify_fields(feature_collection_data_type_fields)
) | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/schema_models/geojson/types/feature_collection.py | 0.852383 | 0.260648 | feature_collection.py | pypi |
from graphql.language.ast import ListValue
from rescape_python_helpers import ramda as R
import graphene
__all__ = [
'GeometryCoordinates'
]
class GeometryCoordinates(graphene.Scalar):
"""
Graphene representation for a GeoDjango GeometryField, which can contain the feature of a geojson blob
"""
class Meta:
description = """
Coordinates respresent a Point, LineString, Polygon, Multipolygon, etc features of a blob. It thus supports
and arbitrary number of embedded arrays with endpoints being Floats to represent coordinates
"""
@classmethod
def serialize(cls, value):
# Do nothing, let the view serializer to the arrays to json
return value
@classmethod
def parse_literal(cls, node):
"""
Parses any array string
:param node:
:return:
"""
def map_value(value):
"""
value is either a ListValue with values that are ListValues or a ListValue with values that
are FloatValues
:param value:
:return:
"""
def handle_floats(v):
if hasattr(v, 'values'):
# Multiple floats:w
return R.map(
lambda fv: float(fv.value),
v.values
)
else:
# Single float
return float(v.value)
return R.if_else(
lambda v: R.isinstance(ListValue, R.head(v.values) if hasattr(v, 'values') else v),
# ListValues
lambda v: [reduce(v.values)],
# FloatValues or single FloatValue
lambda v: [handle_floats(v)]
)(value)
def reduce(values):
return R.reduce(
lambda accum, list_values: R.concat(accum, map_value(list_values)),
[],
values
)
# Create the coordinates by reducing node.values=[node.values=[node.floats], node.value, ...]
return R.reduce(
lambda accum, list_values: reduce(node.values),
[],
reduce(node.values)
)
@classmethod
def parse_value(cls, value):
return value | /rescape-graphene-0.4.19.tar.gz/rescape-graphene-0.4.19/rescape_graphene/schema_models/geojson/types/geometry.py | 0.898928 | 0.557725 | geometry.py | pypi |
import importlib
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from rescape_python_helpers import ewkt_from_feature
from rescape_python_helpers.geospatial.geometry_helpers import ewkt_from_feature_collection
from rescape_python_helpers import ramda as R
def geos_feature_geometry_default():
"""
The default geometry is a polygon of the earth's extent
:return:
"""
return ewkt_from_feature(
{
"type": "Feature",
"geometry": {
"type": "Polygon", "coordinates": [[[-85, -180], [85, -180], [85, 180], [-85, 180], [-85, -180]]]
}
}
)
def geos_feature_collection_geometry_default():
"""
Default FeatureCollection as ewkt representing the entire world
:return:
"""
return ewkt_from_feature_collection(
feature_collection_default()
)
def feature_collection_default():
return {
'type': 'FeatureCollection',
'features': [{
"type": "Feature",
"geometry": {
"type": "Polygon", "coordinates": [[[-85, -180], [85, -180], [85, 180], [-85, 180], [-85, -180]]]
}
}]
}
def region_data_default():
return dict(locations=dict(params=[dict(
country="ENTER A COUNTRY OR REMOVE THIS KEY/VALUE",
state="ENTER A STATE/PROVINCE ABBREVIATION OR REMOVE THIS KEY/VALUE",
city="ENTER A CITY OR REMOVE THIS KEY/VALUE",
neighborhood="ENTER A NEIGHBORHOOD OR REMOVE THIS KEY/VALUE",
blockname="ENTER A BLOCKNAME OR REMOVE THIS KEY/VALUE"
)]))
def project_data_default():
return dict()
def user_state_data_default():
return dict(
userRegions=[]
)
def settings_data_default():
return dict()
def group_state_data_default():
return dict()
def get_region_model():
"""
Uses the same technique as get_user_model() to get the current region model from settings
:return:
"""
try:
return apps.get_model(settings.REGION_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured("REGION_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"REGION_MODEL refers to model '%s' that has not been installed" % settings.REGION_MODEL
)
def get_project_model():
"""
Uses the same technique as get_user_model() to get the current project model from settings
:return:
"""
try:
return apps.get_model(settings.PROJECT_MODEL, require_ready=False)
except ValueError:
raise ImproperlyConfigured("PROJECT_MODEL must be of the form 'app_label.model_name'")
except LookupError:
raise ImproperlyConfigured(
"PROJECT_USER_MODEL refers to model '%s' that has not been installed" % settings.PROJECT_MODEL
)
def get_location_schema():
"""
Uses the same technique as get_user_model() to get the current location model from settings
:return:
"""
try:
modules = settings.LOCATION_SCHEMA_CONFIG.split('.')
return getattr(
importlib.import_module(R.join('.', R.init(modules))),
R.last(modules)
)
except ValueError:
raise ImproperlyConfigured('''settings.LOCATION_SCHEMA_CONFIG must point to the location schema config containing
{
model_class=Location,
graphene_class=LocationType,
graphene_fields=location_fields,
}
''')
except LookupError:
raise ImproperlyConfigured(
"settings.LOCATION_SCHEMA_CONFIG refers to model '%s' that has not been installed" % settings.LOCATION_SCHEMA_CONFIG
)
def get_location_for_project_schema():
"""
Like get_location_schema() but without reverse relationships that cause circular dependencies
:return:
"""
try:
modules = settings.LOCATION_SCHEMA_FOR_PROJECT_CONFIG.split('.')
return getattr(
importlib.import_module(R.join('.', R.init(modules))),
R.last(modules)
)
except ValueError:
raise ImproperlyConfigured('''settings.LOCATION_SCHEMA_FOR_PROJECT_CONFIG must point to the location schema config containing
{
model_class=Location,
graphene_class=LocationType,
graphene_fields=location_fields,
}
''')
except LookupError:
raise ImproperlyConfigured(
"settings.LOCATION_SCHEMA_CONFIG refers to model '%s' that has not been installed" % settings.LOCATION_SCHEMA_CONFIG
)
def get_user_search_data_schema():
"""
Uses the same technique as get_user_model() to get the current location model from settings
:return:
"""
try:
modules = settings.USER_SEARCH_DATA_SCHEMA_CONFIG.split('.')
return getattr(
importlib.import_module(R.join('.', R.init(modules))),
R.last(modules)
)
except ValueError:
raise ImproperlyConfigured('''settings.USER_SEARCH_DATA_SCHEMA_CONFIG must point to the user_search schema config containing
{
graphene_class=UserSearchType,
graphene_fields=user_search_fields,
}
''')
except LookupError:
raise ImproperlyConfigured(
"settings.USER_SEARCH_DATA_SCHEMA_CONFIG refers to model '%s' that has not been installed" % settings.USER_SEARCH_DATA_SCHEMA_CONFIG
)
def get_search_location_schema():
"""
Uses the same technique as get_user_model() to get the current location model from settings
:return:
"""
try:
modules = settings.SEARCH_LOCATION_SCHEMA_CONFIG.split('.')
return getattr(
importlib.import_module(R.join('.', R.init(modules))),
R.last(modules)
)
except ValueError:
raise ImproperlyConfigured('''settings.SEARCH_LOCATION_SCHEMA_CONFIG must point to the search_location schema config containing
{
model_class=SearchLocation,
graphene_class=SearchLocationType,
graphene_fields=search_location_fields,
}
''')
except LookupError:
raise ImproperlyConfigured(
"settings.SEARCH_LOCATION_SCHEMA_CONFIG refers to model '%s' that has not been installed" % settings.SEARCH_LOCATION_SCHEMA_CONFIG
) | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/model_helpers.py | 0.477554 | 0.26605 | model_helpers.py | pypi |
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import rescape_region.model_helpers
class Migration(migrations.Migration):
dependencies = [
('rescape_region', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('key', models.CharField(max_length=20, unique=True)),
('name', models.CharField(max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('geojson', django.contrib.postgres.fields.jsonb.JSONField(default=rescape_region.model_helpers.feature_collection_default)),
('data', django.contrib.postgres.fields.jsonb.JSONField(default=rescape_region.model_helpers.region_data_default)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.DateTimeField(editable=False, null=True)),
('key', models.CharField(max_length=20, unique=True)),
('name', models.CharField(max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('geojson', django.contrib.postgres.fields.jsonb.JSONField(default=rescape_region.model_helpers.feature_collection_default)),
('data', django.contrib.postgres.fields.jsonb.JSONField(default=rescape_region.model_helpers.region_data_default)),
('locations', models.ManyToManyField(blank=True, to='rescape_region.Location')),
],
),
] | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/migrations/0002_location_project.py | 0.526099 | 0.165965 | 0002_location_project.py | pypi |
import reversion
from django.contrib.gis.db import models
from django.db.models import JSONField
from django.db.models import (CharField, ForeignKey, UniqueConstraint, Q)
from safedelete.models import SafeDeleteModel
from rescape_region.models.revision_mixin import RevisionModelMixin
def default():
return dict(
# Settings used to generate the sankey. These are required
settings=dict(
# The default location for nodes whose location fields is 'NA'
default_location=[],
# The column names of the raw data. Used to key columns to meanings
columns=[],
# The column name that stores the Sankey stage of the node
stageKey=None,
# The column name that stores the value of the node
valueVey=None,
# The column name that stores the name of the node
nodeNameKey=None,
# Optional color key of the individual node
nodeColorKey=None,
# Optional color key of the individual link
linkColorKey=None,
# A list of stages. Each stage is a dict with key name and targets array
# The key is used to list targets in the targes array. The name is the readable name
# Targets is a list of keys of other stages
stages=[]
),
# Processed sankey nodes and links. These are generated and readonly
graph=dict(
# Nodes are stored by the stage key that they represent
nodes={},
link=[]
),
# CSV converted to dicts. Each dict contains column values as indicated in settings.columns
raw_data=[]
)
@reversion.register()
class Resource(SafeDeleteModel, RevisionModelMixin):
"""
Models a resource, such as water
"""
key = CharField(max_length=50, null=False)
name = CharField(max_length=50, null=False)
data = JSONField(null=False, default=default)
# TODO we should probably have models.CASCADE here to delete a resource if the region goes away
region = ForeignKey('Region', related_name='resources', null=False, on_delete=models.DO_NOTHING)
class Meta:
app_label = "rescape_region"
constraints = [
# https://stackoverflow.com/questions/33307892/django-unique-together-with-nullable-foreignkey
# This says that for deleted resources, key and deleted date must be unique
UniqueConstraint(fields=['deleted', 'key'],
name='unique_resource_with_deleted'),
# This says that for non-deleted resources, key must be unique
UniqueConstraint(fields=['key'],
condition=Q(deleted=None),
name='unique_resource_without_deleted'),
]
def __str__(self):
return self.name | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/models/resource.py | 0.492676 | 0.240736 | resource.py | pypi |
from django.contrib.auth import get_user_model
from rescape_python_helpers import ramda as R
from rescape_region.models import UserState
from rescape_region.schema_models.scope.location.location_sample import create_local_sample_locations, \
create_local_sample_search_locations
from rescape_region.schema_models.scope.project.project_sample import create_sample_projects
from rescape_region.schema_models.scope.region.region_sample import create_sample_regions
from rescape_region.schema_models.user_sample import create_sample_users
sample_user_states = [
dict(
username="lion", # This is converted to a user=persisted User
data=dict(
userGlobal=dict(
mapbox=dict(viewport=dict(
latitude=50.5915,
longitude=2.0165,
zoom=7
)),
),
userRegions=[
dict(
region=dict(key='belgium'), # key is converted to persisted Region's id
mapbox=dict(viewport=dict(
latitude=50.5915,
longitude=2.0165,
zoom=7
)),
)
],
userProjects=[
dict(
project=dict(key='gare'), # key is converted to persisted Project's id
mapbox=dict(viewport=dict(
latitude=50.846127,
longitude=4.358111,
zoom=10
)),
)
]
)
),
dict(
username="cat", # This is converted to a user=persisted User
data=dict(
userGlobal=dict(
mapbox=dict(viewport=dict(
latitude=50.5915,
longitude=2.0165,
zoom=7
)),
),
userRegions=[
dict(
region=dict(key='belgium'), # key is converted to persisted Region's id
mapbox=dict(viewport=dict(
latitude=50.5915,
longitude=2.0165,
zoom=7
)),
)
],
userProjects=[
dict(
project=dict(key='gare'), # key is converted to persisted Project's id
mapbox=dict(viewport=dict(
latitude=50.846127,
longitude=4.358111,
zoom=10
)),
)
]
)
)
]
def delete_sample_user_states():
UserState.objects.all().delete()
@R.curry
def create_sample_user_state(cls, regions, projects, user_state_dict):
"""
Persists sample user state data into a UserState
:param cls: The UserState class
:param {[Region]} regions: Persisted sample regions
:param {[Projects]} projects: Persisted sample projects
:param user_state_dict: Sample data in the form: dict(
username="lion", # This will be mapped to the User id in create_sample_user_state
data=dict(
userRegions=[
dict(
region=dict(key='belgium'), # key is converted to persisted Region's id
mapbox=dict(viewport=dict(
latitude=50.5915,
longitude=2.0165,
zoom=7
)),
)
]
)
),
:param locations
:param search_locations Search locations that match 0 or more locations
:return:
"""
user = get_user_model().objects.get(username=user_state_dict['username'])
user_state_values = R.merge_deep(
# Skip username and data, they are handled above and below
R.omit(['username', 'data'], user_state_dict),
# Convert data.region_keys to data.user_region ids
dict(
user=user,
data=form_sample_user_state_data(
regions,
projects,
R.prop(
'data',
user_state_dict
)
)
)
)
# Save the user_state with the complete data
user_state = cls(**user_state_values)
user_state.save()
return user_state
def user_state_scope_instances(scope_key, user_scope_key, scope_instances, data):
"""
Creates scope instance dicts for the given instances. New scope instances can be
passed as well for Project, which instructs the server to create the Project when
creating/updating the userProject
:param scope_key: 'region', 'project', etc
:param user_scope_key: 'userRegions', 'userProjects', etc
:param scope_instances: regions or projects or ...
:param data: The userState data to put the instances in. E.g. data.userRegions gets mapped to include
the resolved regions
:return:
"""
scope_instances_by_key = R.map_prop_value_as_index('key', scope_instances)
def resolve_scope_instance(scope_key, user_scope_instance):
# Replace key with id
id = R.compose(
# third get the id if it exists
R.prop_or(None, 'id'),
# second resolve the scope instance if it exists
lambda k: R.prop_or(None, k, scope_instances_by_key),
# first get the key
R.item_str_path(f'{scope_key}.key')
)(user_scope_instance)
return {
scope_key: R.compact_dict(
dict(
# Resolve the persisted Scope instance by key
id=id
) if id else dict(
# Otherwise pass everything so the server can create the instance
# (Currently only supported for projects)
user_scope_instance[scope_key]
)
)
}
return R.map(
# Find the id of th scope instance that matches,
# returning dict(id=scope_instance_id). We can't return the whole scope instance
# because we are saving within json data, not the Django ORM
# If the scope instance is new and doesn't match anything, create the user scope instance
# without an id so that the server saves it (Only implemented for Project, not Region thus far)
lambda user_scope_instance: R.merge(
# Other stuff like mapbox
R.omit([scope_key], user_scope_instance),
# The project or region
resolve_scope_instance(scope_key, user_scope_instance)
),
R.prop(user_scope_key, data)
)
def form_sample_user_state_data(regions, projects, data):
"""
Given data in the form dict(region_keys=[...], ...), converts region_keys to
regions=[{id:x}, {id:y}, ...] by resolving the regions
:param regions: Persisted regions
:param projects: Persisted projects
:param {dict} data: Sample data in the form:
dict(
userRegions=[
dict(
region=dict(key='belgium'), # key is converted to persisted Region's id
mapbox=dict(viewport=dict(
latitude=50.5915,
longitude=2.0165,
zoom=7
)),
)
]
),
:return: Data in the form dict(userRegions=[dict(region=dict(id=x), mapbox=..., ...), ...])
"""
return R.merge(
# Rest of data that's not regions
R.omit(['userRegions', 'userProjects'], data),
dict(
userRegions=user_state_scope_instances('region', 'userRegions', regions, data),
userProjects=user_state_scope_instances('project', 'userProjects', projects, data)
)
)
def create_sample_user_states(
cls, region_cls, project_cls, location_cls, search_location_cls,
create_sample_locations=create_local_sample_locations,
create_sample_search_locations=create_local_sample_search_locations,
create_additional_scope_instance_properties=lambda user_scope_instance: user_scope_instance
):
"""
:param cls: The UserState class
:param region_cls:
:param project_cls:
:param location_cls:
:param search_location_cls:
:param create_sample_locations: Defaults to create_local_sample_locations. Expects
the location_cls as the only arg
:param create_search_sample_locations: Defaults to create_local_sample_search_locations. Expects
the serach_location_cls and a list of sample locations. The locations can be ignored
if creating samples independent of the locations
:param create_additional_scope_instance_properties Function that takes each user_scope_instance
and adds properties to it if needed. This corresponds with schemas defined by users in
additional_user_scope_schemas
:return:
"""
users = create_sample_users()
# Create regions for the users to associate with. A region also needs and owner so we pass users to the function
regions = create_sample_regions(region_cls)
projects = create_sample_projects(project_cls, users, regions)
locations = create_sample_locations(location_cls)
search_locations = create_sample_search_locations(search_location_cls, locations)
# Assign all the locations to each project
for project in projects:
project.locations.add(*locations)
# Merge search_locations into each userScope dict
def sample_user_state_with_search_locations_and_additional_scope_instances(user_scope_name, sample_user_state):
return R.fake_lens_path_set(
f'data.{user_scope_name}'.split('.'),
R.map(
lambda user_scope: R.compose(
# Gives applications a chance to add the needed additional scope instances,
# e.g. userDesignFeatures
lambda user_scope: create_additional_scope_instance_properties(user_scope),
lambda user_scope: R.merge(
user_scope,
dict(
userSearch=dict(
userSearchLocations=R.map(lambda i_search_location: dict(
# Just return with the id since the full data is in the database
searchLocation=R.pick(['id'], i_search_location[1]),
# Set the first search_location to active
activity=dict(isActive=i_search_location[0] == 0)
), enumerate(search_locations))
)
)
)
)(user_scope),
R.item_str_path(f'data.{user_scope_name}', sample_user_state)
),
sample_user_state
)
# Convert all sample user_state dicts to persisted UserState instances
# Use the username to match a real user
user_states = R.map(
lambda sample_user_state: create_sample_user_state(cls, regions, projects, sample_user_state),
# Adds search_locations to each userState.data.[userRegions[*]|userProjects[*]].user_search.userSearchLocations
R.compose(
lambda sample_user_states: R.map(
lambda sample_user_state: sample_user_state_with_search_locations_and_additional_scope_instances(
'userProjects', sample_user_state),
sample_user_states
),
lambda sample_user_states: R.map(
lambda sample_user_state: sample_user_state_with_search_locations_and_additional_scope_instances(
'userRegions', sample_user_state),
sample_user_states
),
)(sample_user_states)
)
return user_states | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/user_state/user_state_sample.py | 0.699768 | 0.24899 | user_state_sample.py | pypi |
from graphene import ObjectType, Float, List, Field, Int, Boolean
from rescape_graphene import resolver_for_dict_field, \
resolver_for_dict_list, model_resolver_for_dict_field, type_modify_fields, FeatureCollectionDataType
from rescape_graphene.schema_models.geojson.types.feature_collection import feature_collection_data_type_fields
from rescape_python_helpers import ramda as R
from rescape_region.schema_models.mapbox.mapbox_data_schema import MapboxDataType, mapbox_data_fields
activity_data_fields = dict(
isActive=dict(type=Boolean)
)
ActivityDataType = type(
'ActivityDataType',
(ObjectType,),
type_modify_fields(activity_data_fields)
)
def user_global_data_fields(class_config):
return dict(
# The mapbox state for the user's Global settings
mapbox=dict(
type=MapboxDataType,
graphene_type=MapboxDataType,
fields=mapbox_data_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field),
)
)
# References the Global instance, dictating settings imposed on or chosen by a user globally
# to which they have some level of access. This also adds settings like mapbox that are particular to the User's use
# of the Region but that the Region itself doesn't care about
def UserGlobalDataType(class_config):
return type(
'UserGlobalDataType',
(ObjectType,),
type_modify_fields(user_global_data_fields(class_config))
)
def user_search_field_for_user_state_scopes(user_search_graphene_class, user_search_graphene_fields):
"""
user_searches is a list of dicts specific to the application
for instance each user_search type might contain fields of the
type django LocationSearch, RegionSearch, ProductSearch, which are models that
correspond to Location, Region, Product, but include filter properties.
It might be possible to generalize this here, and assume that every user_search
needs a config LocationSearch/Location, RegionSearch/Region, ProductSearch/Product
and corresponding graphene types. For now we'll just require a
user_searches_graphene_class that can point to all the application specific types via fields
:param user_search_graphene_class The graphene class that contains to application specific
fields
:param user_search_graphene_fields The graphene fields of the user_search_graphene_class
:return:
"""
return dict(
type=user_search_graphene_class,
graphene_type=user_search_graphene_class,
fields=user_search_graphene_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field)
)
def user_region_data_fields(class_config):
region_class_config = R.prop('region', class_config)
additional_user_scope_schemas = R.prop('additional_user_scope_schemas', class_config)\
if R.prop_or(None, 'additional_user_scope_schemas', class_config) else {}
return dict(
# References a Region.
region=dict(
type=R.prop('graphene_class', region_class_config),
graphene_type=R.prop('graphene_class', region_class_config),
fields=R.prop('graphene_fields', region_class_config),
type_modifier=lambda *type_and_args: Field(
*type_and_args,
resolver=model_resolver_for_dict_field(R.prop('model_class', region_class_config))
)
),
# The mapbox state for the user's use of this Region
mapbox=dict(
type=MapboxDataType,
graphene_type=MapboxDataType,
fields=mapbox_data_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field),
),
# Is this region the active region for this user
activity=dict(
type=ActivityDataType,
graphene_type=ActivityDataType,
fields=activity_data_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field),
),
# A list of user_searches that reference application specific classes
userSearch=user_search_field_for_user_state_scopes(
*R.props(['graphene_class', 'graphene_fields'], R.prop('user_search', class_config))
),
**additional_user_scope_schemas
)
# References a Region model instance, dictating settings imposed on or chosen by a user for a particular Region
# to which they have some level of access. This also adds settings like mapbox that are particular to the User's use
# of the Region but that the Region itself doesn't care about
def UserRegionDataType(class_config):
return type(
'UserRegionDataType',
(ObjectType,),
type_modify_fields(user_region_data_fields(class_config))
)
def user_project_data_fields(class_config):
project_class_config = R.prop('project', class_config)
location_class_config = R.prop('location', class_config)
additional_user_scope_schemas = R.prop('additional_user_scope_schemas', class_config)\
if R.prop_or(None, 'additional_user_scope_schemas', class_config) else {}
return dict(
# References a Project
project=dict(
type=R.prop('graphene_class', project_class_config),
graphene_type=R.prop('graphene_class', project_class_config),
fields=R.prop('graphene_fields', project_class_config),
type_modifier=lambda *type_and_args: Field(
*type_and_args,
resolver=model_resolver_for_dict_field(R.prop('model_class', project_class_config))
)
),
# The mapbox state for the user's use of this Project
mapbox=dict(
type=MapboxDataType,
graphene_type=MapboxDataType,
fields=mapbox_data_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field),
),
locations=dict(
type=R.prop('graphene_class', location_class_config),
graphene_type=R.prop('graphene_class', location_class_config),
fields=R.prop('graphene_fields', location_class_config),
type_modifier=lambda *type_and_args: List(*type_and_args)
),
# Is the project active for the user and similar
activity=dict(
type=ActivityDataType,
graphene_type=ActivityDataType,
fields=activity_data_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field),
),
# A list of user_searches that reference application specific classes
userSearch=user_search_field_for_user_state_scopes(
*R.props(['graphene_class', 'graphene_fields'], R.prop('user_search', class_config))
),
**additional_user_scope_schemas
)
# References a Project model instance, dictating settings imposed on or chosen by a user for a particular Project
# to which they have some level of access. This also adds settings like mapbox that are particular to the User's use
# of the Project but that the Project itself doesn't care about
def UserProjectDataType(class_config):
return type(
'UserProjectDataType',
(ObjectType,),
type_modify_fields(user_project_data_fields(class_config))
)
# User State for their use of Regions, Projects, etc
def user_state_data_fields(class_config):
return dict(
userGlobal=dict(
type=UserGlobalDataType(class_config),
graphene_type=UserGlobalDataType(class_config),
fields=user_global_data_fields(class_config),
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field)
),
userRegions=dict(
type=UserRegionDataType(class_config),
graphene_type=UserRegionDataType(class_config),
fields=user_region_data_fields(class_config),
type_modifier=lambda *type_and_args: List(*type_and_args, resolver=resolver_for_dict_list)
),
userProjects=dict(
type=UserProjectDataType(class_config),
graphene_type=UserProjectDataType(class_config),
fields=user_project_data_fields(class_config),
type_modifier=lambda *type_and_args: List(*type_and_args, resolver=resolver_for_dict_list)
)
)
def UserStateDataType(class_config):
return type(
'UserStateDataType',
(ObjectType,),
type_modify_fields(user_state_data_fields(class_config))
) | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/user_state/user_state_data_schema.py | 0.731922 | 0.264133 | user_state_data_schema.py | pypi |
import graphene
from graphene import Field, Mutation, InputObjectType, ObjectType
from graphene_django.types import DjangoObjectType
from graphql_jwt.decorators import login_required
from rescape_graphene import input_type_fields, REQUIRE, DENY, CREATE, \
input_type_parameters_for_update_or_create, UPDATE, \
guess_update_or_create, graphql_update_or_create, graphql_query, merge_with_django_properties, \
resolver_for_dict_field
from rescape_graphene.graphql_helpers.schema_helpers import update_or_create_with_revision, process_filter_kwargs, \
top_level_allowed_filter_arguments
from rescape_graphene.schema_models.django_object_type_revisioned_mixin import reversion_and_safe_delete_types, \
DjangoObjectTypeRevisionedMixin
from rescape_python_helpers import ramda as R
from rescape_region.models import GroupState
from rescape_region.schema_models.user_state.group_state_data_schema import GroupStateDataType, group_state_data_fields
def create_group_state_mutation(group_state_config):
class GroupStateMutation(graphene.ObjectType):
create_user_state = R.prop('create_mutation_class', group_state_config).Field()
update_user_state = R.prop('update_mutation_class', group_state_config).Field()
return GroupStateMutation
def create_group_state_query(group_state_config):
class GroupStateQuery(ObjectType):
group_states = graphene.List(
R.prop('graphene_class', group_state_config),
**top_level_allowed_filter_arguments(R.prop('graphene_fields', group_state_config),
R.prop('graphene_class', group_state_config))
)
@login_required
def resolve_group_states(self, info, **kwargs):
q_expressions = process_filter_kwargs(GroupState, **R.merge(dict(deleted__isnull=True), kwargs))
return R.prop('model_class', group_state_config).objects.filter(
*q_expressions
)
return GroupStateQuery
def create_group_state_query_and_mutation_classes(class_config):
group_state_config = create_group_state_config(class_config)
return dict(
query=create_group_state_query(group_state_config),
mutation=create_group_state_mutation(group_state_config)
)
def create_group_state_config(class_config):
"""
Creates the GroupStateType based on specific class_config
:param class_config: A dict containing class configurations. Right now it's only region in the form
dict(
region=dict(
model_class=...,
graphene_class=...,
fields=...
)
)
:return:
"""
class GroupStateType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
"""
GroupStateType models GroupState, which represents the settings both imposed upon and chosen by the group
"""
id = graphene.Int(source='pk')
class Meta:
model = GroupState
# Modify data field to use the resolver.
# I guess there's no way to specify a resolver upon field creation, since graphene just reads the underlying
# Django model to generate the fields
GroupStateType._meta.fields['data'] = Field(
GroupStateDataType(class_config),
resolver=resolver_for_dict_field
)
group_state_fields = merge_with_django_properties(GroupStateType, dict(
id=dict(create=DENY, update=REQUIRE),
data=dict(graphene_type=GroupStateDataType(class_config), fields=group_state_data_fields(class_config),
default=lambda: dict()),
**reversion_and_safe_delete_types
))
group_state_mutation_config = dict(
class_name='GroupState',
crud={
CREATE: 'createGroupState',
UPDATE: 'updateGroupState'
},
resolve=guess_update_or_create
)
class UpsertGroupState(Mutation):
"""
Abstract base class for mutation
"""
group_state = Field(GroupStateType)
def mutate(self, info, group_state_data=None):
"""
Update or create the group state
:param info:
:param group_state_data:
:return:
"""
update_or_create_values = input_type_parameters_for_update_or_create(group_state_fields, group_state_data)
# We can do update_or_create since we have a unique group_id in addition to the unique id
group_state, created = update_or_create_with_revision(GroupState, update_or_create_values)
return UpsertGroupState(group_state=group_state)
class CreateGroupState(UpsertGroupState):
"""
Create GroupState mutation class
"""
class Arguments:
group_state_data = type('CreateGroupStateInputType', (InputObjectType,),
input_type_fields(group_state_fields, CREATE, GroupStateType))(required=True)
class UpdateGroupState(UpsertGroupState):
"""
Update GroupState mutation class
"""
class Arguments:
group_state_data = type('UpdateGroupStateInputType', (InputObjectType,),
input_type_fields(group_state_fields, UPDATE, GroupStateType))(required=True)
graphql_update_or_create_group_state = graphql_update_or_create(group_state_mutation_config, group_state_fields)
graphql_query_group_states = graphql_query(GroupStateType, group_state_fields, 'groupStates')
return dict(
model_class=GroupState,
graphene_class=GroupStateType,
graphene_fields=group_state_fields,
create_mutation_class=CreateGroupState,
update_mutation_class=UpdateGroupState,
graphql_mutation=graphql_update_or_create_group_state,
graphql_query=graphql_query_group_states
) | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/user_state/group_state_schema.py | 0.633864 | 0.309519 | group_state_schema.py | pypi |
from operator import itemgetter
import graphene
from django.db import transaction
from graphene import InputObjectType, Mutation, Field, ObjectType, List
from graphene_django.types import DjangoObjectType
from graphql_jwt.decorators import login_required
from rescape_graphene import REQUIRE, graphql_update_or_create, graphql_query, guess_update_or_create, \
CREATE, UPDATE, input_type_parameters_for_update_or_create, input_type_fields, merge_with_django_properties, \
DENY, FeatureCollectionDataType, resolver_for_dict_field, UserType, user_fields, \
create_paginated_type_mixin
from rescape_graphene import increment_prop_until_unique, enforce_unique_props
from rescape_graphene.django_helpers.pagination import resolve_paginated_for_type, pagination_allowed_filter_arguments
from rescape_graphene.graphql_helpers.schema_helpers import process_filter_kwargs, delete_if_marked_for_delete, \
update_or_create_with_revision, top_level_allowed_filter_arguments, ALLOW
from rescape_graphene.schema_models.django_object_type_revisioned_mixin import reversion_and_safe_delete_types, \
DjangoObjectTypeRevisionedMixin
from rescape_graphene.schema_models.geojson.types.feature_collection import feature_collection_data_type_fields
from rescape_python_helpers import ramda as R
from rescape_region.model_helpers import get_project_model, get_location_for_project_schema
from rescape_region.schema_models.scope.region.region_schema import RegionType, region_fields
from .project_data_schema import ProjectDataType, project_data_fields
location_type = get_location_for_project_schema()['graphene_class']
location_fields = get_location_for_project_schema()['graphene_fields']
raw_project_fields = dict(
id=dict(create=DENY, update=REQUIRE),
key=dict(
create=REQUIRE,
unique_with=increment_prop_until_unique(get_project_model(), None, 'key', R.pick(['deleted', 'user_id'])),
# Allows UserState.data.userProjects to persist a new project
related_input=ALLOW
),
name=dict(
create=REQUIRE,
# Allows UserState.data.userProjects to persist a new project
related_input=ALLOW
),
# This refers to the ProjectDataType, which is a representation of all the json fields of Project.data
data=dict(graphene_type=ProjectDataType, fields=project_data_fields, default=lambda: dict()),
# This is the OSM geojson
geojson=dict(
graphene_type=FeatureCollectionDataType,
fields=feature_collection_data_type_fields
),
region=dict(
graphene_type=RegionType,
fields=region_fields,
# Allows UserState.data.userProjects to persist a new project
related_input = ALLOW
),
# The locations of the project. The Graphene type is dynamic to support application specific location classes
locations=dict(
graphene_type=lambda: location_type,
fields=lambda: location_fields,
type_modifier=lambda *type_and_args: List(*type_and_args),
# Allows UserState.data.userProjects to persist a new project
related_input=ALLOW
),
# This is a Foreign Key. Graphene generates these relationships for us, but we need it here to
# support our Mutation subclasses and query_argument generation
user=dict(
graphene_type=UserType,
fields=user_fields,
# Allows UserState.data.userProjects to persist a new project
related_input=ALLOW
),
**reversion_and_safe_delete_types
)
class ProjectType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
id = graphene.Int(source='pk')
class Meta:
model = get_project_model()
# Modify data field to use the resolver.
# I guess there's no way to specify a resolver upon field creation, since graphene just reads the underlying
# Django model to generate the fields
ProjectType._meta.fields['data'] = Field(
ProjectDataType,
resolver=resolver_for_dict_field
)
# Modify the geojson field to use the geometry collection resolver
ProjectType._meta.fields['geojson'] = Field(
FeatureCollectionDataType,
resolver=resolver_for_dict_field
)
project_fields = merge_with_django_properties(ProjectType, raw_project_fields)
project_mutation_config = dict(
class_name='Project',
crud={
CREATE: 'createProject',
UPDATE: 'updateProject'
},
resolve=guess_update_or_create
)
# Paginated version of ProjectType
(ProjectPaginatedType, project_paginated_fields) = itemgetter('type', 'fields')(
create_paginated_type_mixin(ProjectType, project_fields)
)
class ProjectQuery(ObjectType):
projects = graphene.List(
ProjectType,
**top_level_allowed_filter_arguments(project_fields, ProjectType)
)
projects_paginated = Field(
ProjectPaginatedType,
**pagination_allowed_filter_arguments(project_paginated_fields, ProjectPaginatedType)
)
@staticmethod
def _resolve_projects(info, **kwargs):
return project_resolver('filter', **kwargs)
@login_required
def resolve_projects(self, info, **kwargs):
return ProjectQuery._resolve_projects(info, **kwargs)
@login_required
def resolve_projects_paginated(self, info, **kwargs):
return resolve_paginated_for_type(
ProjectPaginatedType,
ProjectQuery._resolve_projects,
**kwargs
)
def project_resolver(manager_method, **kwargs):
"""
Resolves the projects for model get_project_model()
:param manager_method: 'filter', 'get', or 'count'
:param kwargs: Filter arguments for the Project
:return:
"""
q_expressions = process_filter_kwargs(get_project_model(), **R.merge(dict(deleted__isnull=True), kwargs))
return getattr(get_project_model().objects, manager_method)(
*q_expressions
)
class UpsertProject(Mutation):
"""
Abstract base class for mutation
"""
project = Field(ProjectType)
@transaction.atomic
@login_required
def mutate(self, info, project_data=None):
deleted_project_response = delete_if_marked_for_delete(get_project_model(), UpsertProject, 'project',
project_data)
if deleted_project_response:
return deleted_project_response
# We must merge in existing project.data if we are updating data
if R.has('id', project_data) and R.has('data', project_data):
# New data gets priority, but this is a deep merge.
# If anything is omitted from the new data, it's assumed that the existing value should remain
project_data['data'] = R.merge_deep(
get_project_model().objects.get(id=project_data['id']).data,
project_data['data']
)
# Make sure that all props are unique that must be, either by modifying values or erring.
modified_project_data = enforce_unique_props(project_fields, project_data)
# Omit many-to-many locations
update_or_create_values = input_type_parameters_for_update_or_create(
project_fields,
R.omit(['locations'], modified_project_data)
)
project, created = update_or_create_with_revision(get_project_model(), update_or_create_values)
locations = R.prop_or([], 'locations', modified_project_data)
any_locations = R.compose(R.lt(0), R.length, locations)
if not created and any_locations:
# If update and locations are specified, clear the existing ones
project.locations.clear()
# Location objects come in as [{id:...}, {id:...}], so pass the id to Django
if any_locations:
project.locations.add(*R.map(R.prop('id'), locations))
return UpsertProject(project=project)
class CreateProject(UpsertProject):
"""
Create Project mutation class
"""
class Arguments:
project_data = type('CreateProjectInputType', (InputObjectType,),
input_type_fields(project_fields, CREATE, ProjectType))(required=True)
class UpdateProject(UpsertProject):
"""
Update Project mutation class
"""
class Arguments:
project_data = type('UpdateProjectInputType', (InputObjectType,),
input_type_fields(project_fields, UPDATE, ProjectType))(required=True)
class ProjectMutation(graphene.ObjectType):
create_project = CreateProject.Field()
update_project = UpdateProject.Field()
graphql_update_or_create_project = graphql_update_or_create(project_mutation_config, project_fields)
graphql_query_projects = graphql_query(ProjectType, project_fields, 'projects')
def graphql_query_projects_limited(project_fields):
return graphql_query(ProjectType, project_fields, 'projects') | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/scope/project/project_schema.py | 0.557123 | 0.210766 | project_schema.py | pypi |
import graphene
from graphene import Field, List
from graphene_django.types import DjangoObjectType
from rescape_graphene import REQUIRE, merge_with_django_properties, \
DENY, FeatureCollectionDataType, resolver_for_dict_field
from rescape_graphene import increment_prop_until_unique
from rescape_graphene.schema_models.django_object_type_revisioned_mixin import reversion_and_safe_delete_types, \
DjangoObjectTypeRevisionedMixin
from rescape_graphene.schema_models.geojson.types.feature_collection import feature_collection_data_type_fields
from rescape_python_helpers import ramda as R
from rescape_region.models import Location
from rescape_region.schema_models.scope.location.location_data_schema import LocationDataType, location_data_fields
def reverse_relationships():
from rescape_region.schema_models.scope.project.project_schema import ProjectType, project_fields
# Model a reverse relationship so we can get the locations of a project
return dict(
projects=dict(
graphene_type=ProjectType,
fields=R.omit(['locations'], project_fields),
type_modifier=lambda *type_and_args: List(*type_and_args)
)
)
def raw_location_fields(with_reverse=True):
return R.merge_all([
dict(
id=dict(create=DENY, update=REQUIRE),
key=dict(create=REQUIRE, unique_with=increment_prop_until_unique(Location, None, 'key', {})),
name=dict(create=REQUIRE),
# This refers to the LocationDataType, which is a representation of all the json fields of Location.data
data=dict(
graphene_type=LocationDataType,
fields=location_data_fields,
default=lambda: dict(),
# type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field),
),
# This is the OSM geojson
geojson=dict(
graphene_type=FeatureCollectionDataType,
fields=feature_collection_data_type_fields,
# type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field)
)
),
# Add reverse relationships if this is for location_schema, but don't if it's for project_schema
reverse_relationships() if with_reverse else {},
reversion_and_safe_delete_types
])
class LocationType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
id = graphene.Int(source='pk')
class Meta:
model = Location
# Modify data field to use the resolver.
# I guess there's no way to specify a resolver upon field creation, since graphene just reads the underlying
# Django model to generate the fields
LocationType._meta.fields['data'] = Field(
LocationDataType,
resolver=resolver_for_dict_field
)
# Modify the geojson field to use the geometry collection resolver
LocationType._meta.fields['geojson'] = Field(
FeatureCollectionDataType,
resolver=resolver_for_dict_field
)
location_fields_without_reverse = merge_with_django_properties(LocationType, raw_location_fields(False))
# This must be referenced in settings.py
location_schema_config = dict(
model_class=Location,
graphene_class=LocationType,
graphene_fields=location_fields_without_reverse
) | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/scope/location/location_schema_helpers.py | 0.58818 | 0.295992 | location_schema_helpers.py | pypi |
from rescape_python_helpers import ramda as R
from django.db import transaction
from rescape_region.models import SearchJurisdiction
local_sample_locations = [
dict(
key='grandPlace',
name='Grand Place',
geojson={
'type': 'FeatureCollection',
'features': [{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225],
[51.4750237087, 6.15665815596],
[49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
}
}]
},
data=dict()
),
dict(
key='petitPlace',
name='Petit Place',
geojson={
'type': 'FeatureCollection',
'features': [{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225],
[51.4750237087, 6.15665815596],
[49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
}
}]
},
data=dict()
),
]
@transaction.atomic
def create_sample_location(cls, location_dict):
# Save the location with the complete data
if R.has('key', location_dict):
# rescape_region uses a key for uniqueness
return cls.objects.update_or_create(key=R.prop('key', location_dict), defaults=R.omit(['key'], location_dict))[0]
else:
# other implementors should delete duplicates first
location = cls(**location_dict)
location.save()
return location
def delete_sample_locations(cls):
cls.objects.all().delete()
def create_local_sample_locations(cls, sample_locations=local_sample_locations):
"""
Create sample locations
:param cls: THe Location class
:param sample_locations Defaults to _sample_locations defined in this file. Apps using this can pass their own
:return:
"""
delete_sample_locations(cls)
# Convert all sample location dicts to persisted Location instances
# Give each reach an owner
return R.map(
lambda kv: create_sample_location(cls, kv[1]),
enumerate(sample_locations)
)
def create_sample_search_location(cls, search_location_dict):
"""
Matches the location name with street.name of a new search location
:param cls:
:param search_location_dict:
:return:
"""
search_location = cls(
name=f"Searchin' for {search_location_dict.name}",
street=dict(nameContains=search_location_dict.name)
)
search_location.save()
search_jurisdictions = R.map(lambda instance: instance.save() or instance, [SearchJurisdiction(data=dict(country='Nowhere'))])
search_location.jurisdictions.set(search_jurisdictions)
return search_location
def delete_sample_search_locations(cls):
cls.objects.all().delete()
def create_local_sample_search_locations(cls, sample_locations):
"""
Create a sample search location that matches each location by name
:param cls:
:param sample_locations:
:return:
"""
delete_sample_search_locations(cls)
return R.map(
lambda kv: create_sample_search_location(cls, kv[1]),
enumerate(sample_locations)
) | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/scope/location/location_sample.py | 0.740456 | 0.358718 | location_sample.py | pypi |
from operator import itemgetter
import graphene
from django.db import transaction
from graphene import InputObjectType, Mutation, Field, ObjectType
from graphql_jwt.decorators import login_required
from rescape_graphene import enforce_unique_props
from rescape_graphene import graphql_update_or_create, graphql_query, guess_update_or_create, \
CREATE, UPDATE, input_type_parameters_for_update_or_create, input_type_fields, merge_with_django_properties, \
create_paginated_type_mixin
from rescape_graphene.django_helpers.pagination import resolve_paginated_for_type, pagination_allowed_filter_arguments
from rescape_graphene.graphql_helpers.schema_helpers import update_or_create_with_revision, \
top_level_allowed_filter_arguments, delete_if_marked_for_delete, \
query_with_filter_and_order_kwargs
from rescape_python_helpers import ramda as R
from rescape_region.models import Location
from rescape_region.schema_models.scope.location.location_schema_helpers import LocationType, raw_location_fields
location_fields = merge_with_django_properties(LocationType, raw_location_fields(True))
# Paginated version of LocationType
(LocationPaginatedType, location_paginated_fields) = itemgetter('type', 'fields')(
create_paginated_type_mixin(LocationType, location_fields)
)
class LocationQuery(ObjectType):
locations = graphene.List(
LocationType,
**top_level_allowed_filter_arguments(location_fields, LocationType)
)
locations_paginated = Field(
LocationPaginatedType,
**pagination_allowed_filter_arguments(location_paginated_fields, LocationPaginatedType)
)
@staticmethod
def _resolve_locations(info, **kwargs):
# Default to not deleted, it can be overridden by kwargs
return query_with_filter_and_order_kwargs(Location, **R.merge(dict(deleted__isnull=True), kwargs))
@login_required
def resolve_locations(self, info, **kwargs):
return LocationQuery._resolve_locations(info, **kwargs)
@login_required
def resolve_locations_paginated(self, info, **kwargs):
return resolve_paginated_for_type(
LocationPaginatedType,
LocationQuery._resolve_locations,
**kwargs
)
location_mutation_config = dict(
class_name='Location',
crud={
CREATE: 'createLocation',
UPDATE: 'updateLocation'
},
resolve=guess_update_or_create
)
class UpsertLocation(Mutation):
"""
Abstract base class for mutation
"""
location = Field(LocationType)
@transaction.atomic
@login_required
def mutate(self, info, location_data=None):
with transaction.atomic():
deleted_location_response = delete_if_marked_for_delete(Location, UpsertLocation, 'location', location_data)
if deleted_location_response:
return deleted_location_response
# We must merge in existing location.data if we are updating data
if R.has('id', location_data) and R.has('data', location_data):
# New data gets priority, but this is a deep merge.
location_data['data'] = R.merge_deep(
Location.objects.get(id=location_data['id']).data,
location_data['data']
)
# Make sure that all props are unique that must be, either by modifying values or erring.
modified_location_data = enforce_unique_props(location_fields, location_data)
update_or_create_values = input_type_parameters_for_update_or_create(location_fields, modified_location_data)
location, created = update_or_create_with_revision(Location, update_or_create_values)
return UpsertLocation(location=location)
class CreateLocation(UpsertLocation):
"""
Create Location mutation class
"""
class Arguments:
location_data = type('CreateLocationInputType', (InputObjectType,),
input_type_fields(location_fields, CREATE, LocationType))(required=True)
class UpdateLocation(UpsertLocation):
"""
Update Location mutation class
"""
class Arguments:
location_data = type('UpdateLocationInputType', (InputObjectType,),
input_type_fields(location_fields, UPDATE, LocationType))(required=True)
class LocationMutation(graphene.ObjectType):
create_location = CreateLocation.Field()
update_location = UpdateLocation.Field()
graphql_update_or_create_location = graphql_update_or_create(location_mutation_config, location_fields)
graphql_query_locations = graphql_query(LocationType, location_fields, 'locations')
# This must be referenced in settings.py
location_schema_config = dict(
model_class=Location,
graphene_class=LocationType,
graphene_fields=location_fields,
query=LocationQuery,
mutation=LocationMutation
)
graphql_query_locations_paginated = graphql_query(
LocationPaginatedType,
location_paginated_fields,
'locationsPaginated'
) | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/scope/location/location_schema.py | 0.605916 | 0.204124 | location_schema.py | pypi |
from rescape_python_helpers import ramda as R
from django.db import transaction
sample_regions = [
dict(
key='norwayOslo',
name='Oslo, Norway',
geojson={
'type': 'FeatureCollection',
'features': [{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225],
[51.4750237087, 6.15665815596],
[49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
}
}]
},
data=dict(locations=dict(params=[dict(country='Norway', city='Oslo')]))
),
dict(
key='belgium',
name='Belgium',
geojson={
'type': 'FeatureCollection',
'features': [{
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[49.5294835476, 2.51357303225], [51.4750237087, 2.51357303225],
[51.4750237087, 6.15665815596],
[49.5294835476, 6.15665815596], [49.5294835476, 2.51357303225]]]
}
}]
},
data=dict(locations=dict(params=[dict(country='Belgium')]))
)
]
@transaction.atomic
def create_sample_region(cls, region_dict):
# Save the region with the complete data
region = cls(**region_dict)
region.save()
return region
def delete_sample_regions(cls):
cls.objects.all().delete()
def create_sample_regions(cls):
"""
Create sample regions
:param cls The Region class
:return:
"""
delete_sample_regions(cls)
# Convert all sample region dicts to persisted Region instances
# Give each reach an owner
return R.map(
lambda kv: create_sample_region(cls, kv[1]),
enumerate(sample_regions)
) | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/scope/region/region_sample.py | 0.632843 | 0.338104 | region_sample.py | pypi |
from operator import itemgetter
import graphene
from django.db import transaction
from graphene import InputObjectType, Mutation, Field, ObjectType
from graphene_django.types import DjangoObjectType
from graphql_jwt.decorators import login_required
from rescape_graphene import REQUIRE, graphql_update_or_create, graphql_query, guess_update_or_create, \
CREATE, UPDATE, input_type_parameters_for_update_or_create, input_type_fields, merge_with_django_properties, \
DENY, FeatureCollectionDataType, resolver_for_dict_field, create_paginated_type_mixin, \
get_paginator
from rescape_graphene import increment_prop_until_unique, enforce_unique_props
from rescape_graphene.django_helpers.pagination import resolve_paginated_for_type, pagination_allowed_filter_arguments
from rescape_graphene.graphql_helpers.schema_helpers import process_filter_kwargs, delete_if_marked_for_delete, \
update_or_create_with_revision, top_level_allowed_filter_arguments, allowed_filter_arguments
from rescape_graphene.schema_models.django_object_type_revisioned_mixin import reversion_and_safe_delete_types, \
DjangoObjectTypeRevisionedMixin
from rescape_graphene.schema_models.geojson.types.feature_collection import feature_collection_data_type_fields
from rescape_python_helpers import ramda as R
from rescape_region.model_helpers import get_region_model
from rescape_region.models.region import Region
from .region_data_schema import RegionDataType, region_data_fields
raw_region_fields = dict(
id=dict(create=DENY, update=REQUIRE),
key=dict(create=REQUIRE, unique_with=increment_prop_until_unique(Region, None, 'key', R.pick(['deleted']))),
name=dict(create=REQUIRE),
# This refers to the RegionDataType, which is a representation of all the json fields of Region.data
data=dict(graphene_type=RegionDataType, fields=region_data_fields, default=lambda: dict()),
# This is the OSM geojson
geojson=dict(
graphene_type=FeatureCollectionDataType,
fields=feature_collection_data_type_fields
),
**reversion_and_safe_delete_types
)
class RegionType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
id = graphene.Int(source='pk')
class Meta:
model = get_region_model()
# Modify data field to use the resolver.
# I guess there's no way to specify a resolver upon field creation, since graphene just reads the underlying
# Django model to generate the fields
RegionType._meta.fields['data'] = Field(
RegionDataType,
resolver=resolver_for_dict_field
)
# Modify the geojson field to use the geometry collection resolver
RegionType._meta.fields['geojson'] = Field(
FeatureCollectionDataType,
resolver=resolver_for_dict_field
)
region_fields = merge_with_django_properties(RegionType, raw_region_fields)
# Paginated version of RegionType
(RegionPaginatedType, region_paginated_fields) = itemgetter('type', 'fields')(
create_paginated_type_mixin(RegionType, region_fields)
)
class RegionQuery(ObjectType):
regions = graphene.List(
RegionType,
**top_level_allowed_filter_arguments(region_fields, RegionType)
)
regions_paginated = Field(
RegionPaginatedType,
**pagination_allowed_filter_arguments(region_paginated_fields, RegionPaginatedType)
)
@staticmethod
def _resolve_regions(info, **kwargs):
return region_resolver('filter', **kwargs)
@login_required
def resolve_regions(self, info, **kwargs):
return RegionQuery._resolve_regions(info, **kwargs)
@login_required
def resolve_regions_paginated(self, info, **kwargs):
return resolve_paginated_for_type(
RegionPaginatedType,
RegionQuery._resolve_regions,
**kwargs
)
def region_resolver(manager_method, **kwargs):
"""
Resolves the regions for model get_region_model()
:param manager_method: 'filter', 'get', or 'count'
:param kwargs: Filter arguments for the Region
:return:
"""
q_expressions = process_filter_kwargs(get_region_model(), **R.merge(dict(deleted__isnull=True), kwargs))
return getattr(get_region_model().objects, manager_method)(
*q_expressions
)
region_mutation_config = dict(
class_name='Region',
crud={
CREATE: 'createRegion',
UPDATE: 'updateRegion'
},
resolve=guess_update_or_create
)
class UpsertRegion(Mutation):
"""
Abstract base class for mutation
"""
region = Field(RegionType)
@transaction.atomic
@login_required
def mutate(self, info, region_data=None):
deleted_region_response = delete_if_marked_for_delete(Region, UpsertRegion, 'region', region_data)
if deleted_region_response:
return deleted_region_response
# We must merge in existing region.data if we are updating data
if R.has('id', region_data) and R.has('data', region_data):
# New data gets priority, but this is a deep merge.
region_data['data'] = R.merge_deep(
Region.objects.get(id=region_data['id']).data,
region_data['data']
)
# Make sure that all props are unique that must be, either by modifying values or erring.
modified_region_data = enforce_unique_props(region_fields, region_data)
update_or_create_values = input_type_parameters_for_update_or_create(region_fields, modified_region_data)
region, created = update_or_create_with_revision(Region, update_or_create_values)
return UpsertRegion(region=region)
class CreateRegion(UpsertRegion):
"""
Create Region mutation class
"""
class Arguments:
region_data = type('CreateRegionInputType', (InputObjectType,),
input_type_fields(region_fields, CREATE, RegionType))(required=True)
class UpdateRegion(UpsertRegion):
"""
Update Region mutation class
"""
class Arguments:
region_data = type('UpdateRegionInputType', (InputObjectType,),
input_type_fields(region_fields, UPDATE, RegionType))(required=True)
class RegionMutation(graphene.ObjectType):
create_region = CreateRegion.Field()
update_region = UpdateRegion.Field()
graphql_update_or_create_region = graphql_update_or_create(region_mutation_config, region_fields)
graphql_query_regions = graphql_query(RegionType, region_fields, 'regions')
def graphql_query_regions_limited(region_fields):
return graphql_query(RegionType, region_fields, 'regions')
graphql_query_regions_paginated = graphql_query(
RegionPaginatedType,
region_paginated_fields,
'regionsPaginated'
) | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/scope/region/region_schema.py | 0.650356 | 0.240039 | region_schema.py | pypi |
from graphene import ObjectType, Float, Field, Int, String
from rescape_graphene import resolver_for_dict_field, \
type_modify_fields
settings_viewport_data_fields = dict(
latitude=dict(type=Float),
longitude=dict(type=Float),
zoom=dict(type=Int)
)
# Viewport settings within Mapbox
SettingsViewportDataType = type(
'SettingsViewportDataType',
(ObjectType,),
type_modify_fields(settings_viewport_data_fields)
)
settings_mapbox_data_fields = dict(
viewport=dict(
type=SettingsViewportDataType,
graphene_type=SettingsViewportDataType,
fields=settings_viewport_data_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field),
)
)
# Mapbox settings for the User's use of a particular Region
SettingsMapboxDataType = type(
'SettingsMapboxDataType',
(ObjectType,),
type_modify_fields(settings_mapbox_data_fields)
)
settings_api_data_field = dict(
protocol=dict(type=String),
host=dict(type=String),
port=dict(type=String),
path=dict(type=String),
)
# The API settings
SettingsApiDataType = type(
'SettingsApiDataType',
(ObjectType,),
type_modify_fields(settings_api_data_field)
)
settings_overpass_data_fields = dict(
cellSize=dict(type=Int),
sleepBetweenCalls=dict(type=Int),
)
# The Overpass (OpenStreetMap) API settings
SettingsOverpassDataType = type(
'SettingsOverpassDataType',
(ObjectType,),
type_modify_fields(settings_overpass_data_fields)
)
# User State for their use of Regions, Projects, etc
settings_data_fields = dict(
domain=dict(type=String),
api=dict(
type=SettingsApiDataType,
graphene_type=SettingsApiDataType,
fields=settings_api_data_field,
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field)
),
overpass=dict(
type=SettingsOverpassDataType,
graphene_type=SettingsOverpassDataType,
fields=settings_overpass_data_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field)
),
mapbox=dict(
type=SettingsMapboxDataType,
graphene_type=SettingsMapboxDataType,
fields=settings_mapbox_data_fields,
type_modifier=lambda *type_and_args: Field(*type_and_args, resolver=resolver_for_dict_field)
)
)
SettingsDataType = type(
'SettingsDataType',
(ObjectType,),
type_modify_fields(settings_data_fields)
) | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/settings/settings_data_schema.py | 0.473901 | 0.229244 | settings_data_schema.py | pypi |
import graphene
from django.db import transaction
from graphene import InputObjectType, Mutation, Field, ObjectType
from graphene_django.types import DjangoObjectType
from graphql_jwt.decorators import login_required
from rescape_graphene import REQUIRE, graphql_update_or_create, graphql_query, guess_update_or_create, \
CREATE, UPDATE, input_type_parameters_for_update_or_create, input_type_fields, merge_with_django_properties, \
DENY, resolver_for_dict_field
from rescape_graphene import enforce_unique_props
from rescape_graphene.graphql_helpers.schema_helpers import process_filter_kwargs, update_or_create_with_revision, \
top_level_allowed_filter_arguments
from rescape_graphene.schema_models.django_object_type_revisioned_mixin import reversion_and_safe_delete_types, \
DjangoObjectTypeRevisionedMixin
from rescape_python_helpers import ramda as R
from rescape_region.models.settings import Settings
from rescape_region.schema_models.scope.region.region_schema import RegionType
from .settings_data_schema import SettingsDataType, settings_data_fields
raw_settings_fields = dict(
id=dict(create=DENY, update=REQUIRE),
key=dict(create=REQUIRE),
# This refers to the SettingsDataType, which is a representation of all the json fields of Settings.data
data=dict(graphene_type=SettingsDataType, fields=settings_data_fields, default=lambda: dict()),
**reversion_and_safe_delete_types
)
class SettingsType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
id = graphene.Int(source='pk')
class Meta:
model = Settings
# Modify data field to use the resolver.
# I guess there's no way to specify a resolver upon field creation, since graphene just reads the underlying
# Django model to generate the fields
SettingsType._meta.fields['data'] = Field(
SettingsDataType,
resolver=resolver_for_dict_field
)
settings_fields = merge_with_django_properties(SettingsType, raw_settings_fields)
settings_mutation_config = dict(
class_name='Settings',
crud={
CREATE: 'createSettings',
UPDATE: 'updateSettings'
},
resolve=guess_update_or_create
)
class SettingsQuery(ObjectType):
settings = graphene.List(
SettingsType,
**top_level_allowed_filter_arguments(settings_fields, RegionType)
)
def resolve_settings(self, info, **kwargs):
q_expressions = process_filter_kwargs(Settings, **R.merge(dict(deleted__isnull=True), kwargs))
return Settings.objects.filter(
*q_expressions
)
class UpsertSettings(Mutation):
"""
Abstract base class for mutation
"""
settings = Field(SettingsType)
@transaction.atomic
@login_required
def mutate(self, info, settings_data=None):
# We must merge in existing settings.data if we are updating data
if R.has('id', settings_data) and R.has('data', settings_data):
# New data gets priority, but this is a deep merge.
settings_data['data'] = R.merge_deep(
Settings.objects.get(id=settings_data['id']).data,
settings_data['data']
)
# Make sure that all props are unique that must be, either by modifying values or erring.
modified_settings_data = enforce_unique_props(settings_fields, settings_data)
update_or_create_values = input_type_parameters_for_update_or_create(settings_fields, modified_settings_data)
settings, created = update_or_create_with_revision(Settings, update_or_create_values)
return UpsertSettings(settings=settings)
class CreateSettings(UpsertSettings):
"""
Create Settings mutation class
"""
class Arguments:
settings_data = type('CreateSettingsInputType', (InputObjectType,),
input_type_fields(settings_fields, CREATE, SettingsType))(required=True)
class UpdateSettings(UpsertSettings):
"""
Update Settings mutation class
"""
class Arguments:
settings_data = type('UpdateSettingsInputType', (InputObjectType,),
input_type_fields(settings_fields, UPDATE, SettingsType))(required=True)
class SettingsMutation(graphene.ObjectType):
create_settings = CreateSettings.Field()
update_settings = UpdateSettings.Field()
graphql_update_or_create_settings = graphql_update_or_create(settings_mutation_config, settings_fields)
graphql_query_settings = graphql_query(SettingsType, settings_fields, 'settings') | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/settings/settings_schema.py | 0.556882 | 0.245401 | settings_schema.py | pypi |
from operator import itemgetter
import graphene
from django.db import transaction
from graphene import InputObjectType, Mutation, Field, ObjectType
from graphene_django.types import DjangoObjectType
from rescape_graphene import REQUIRE, graphql_update_or_create, graphql_query, guess_update_or_create, \
CREATE, UPDATE, input_type_parameters_for_update_or_create, input_type_fields, merge_with_django_properties, \
DENY, FeatureCollectionDataType, resolver_for_dict_field, create_paginated_type_mixin
from rescape_graphene import enforce_unique_props
from rescape_graphene.django_helpers.pagination import resolve_paginated_for_type, pagination_allowed_filter_arguments
from rescape_graphene.django_helpers.versioning import create_version_container_type, resolve_version_instance, \
versioning_allowed_filter_arguments
from rescape_graphene.graphql_helpers.schema_helpers import process_filter_kwargs, delete_if_marked_for_delete, \
update_or_create_with_revision, ALLOW, top_level_allowed_filter_arguments
from rescape_graphene.schema_models.django_object_type_revisioned_mixin import reversion_and_safe_delete_types, \
DjangoObjectTypeRevisionedMixin
from rescape_graphene.schema_models.geojson.types.feature_collection import feature_collection_data_type_fields
from rescape_region.schema_models.jurisdiction.jurisdiction_data_schema import JurisdictionDataType, \
jurisdiction_data_fields
from rescape_region.models.jurisdiction import Jurisdiction
class JurisdictionType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
id = graphene.Int(source='pk')
class Meta:
model = Jurisdiction
# Modify the geojson field to use the geometry collection resolver
JurisdictionType._meta.fields['geojson'] = Field(
FeatureCollectionDataType,
resolver=resolver_for_dict_field
)
JurisdictionType._meta.fields['data'] = Field(
JurisdictionDataType,
resolver=resolver_for_dict_field
)
jurisdiction_fields = merge_with_django_properties(JurisdictionType, dict(
id=dict(create=DENY, update=REQUIRE),
# This is the OSM geojson for the jurisdiction
geojson=dict(
graphene_type=FeatureCollectionDataType,
fields=feature_collection_data_type_fields,
# Allow geojson as related input as long as id so we can create/update jurisdictions when saving locations
related_input=ALLOW
),
data=dict(
graphene_type=JurisdictionDataType,
fields=jurisdiction_data_fields,
default=lambda: dict(streets=[]),
# Allow data as related input as long as id so we can create/update jurisdictions when saving locations
related_input=ALLOW
),
**reversion_and_safe_delete_types
))
# Paginated version of JurisdictionType
(JurisdictionPaginatedType, jurisdiction_paginated_fields) = itemgetter('type', 'fields')(
create_paginated_type_mixin(JurisdictionType, jurisdiction_fields)
)
# Revision version of JurisdictionType
(JurisdictionVersionedType, jurisdiction_versioned_fields) = itemgetter('type', 'fields')(
create_version_container_type(JurisdictionType, jurisdiction_fields)
)
class JurisdictionQuery(ObjectType):
jurisdictions = graphene.List(
JurisdictionType,
**top_level_allowed_filter_arguments(jurisdiction_fields, JurisdictionType)
)
jurisdictions_paginated = Field(
JurisdictionPaginatedType,
**pagination_allowed_filter_arguments(jurisdiction_paginated_fields, JurisdictionPaginatedType)
)
jurisdictions_versioned = Field(
JurisdictionVersionedType,
**versioning_allowed_filter_arguments(jurisdiction_versioned_fields, JurisdictionVersionedType)
)
@staticmethod
def _resolve_jurisdictions(info, **kwargs):
return jurisdiction_resolver('filter', **kwargs)
def resolve_jurisdictions(self, info, **kwargs):
return jurisdiction_resolver(info, **kwargs)
def resolve_jurisdictions_paginated(self, info, **kwargs):
return resolve_paginated_for_type(
JurisdictionPaginatedType,
JurisdictionQuery._resolve_jurisdictions,
**kwargs
)
def resolve_jurisdictions_versioned(self, info, **kwargs):
"""
Get the version history of the jurisdiction matching the kwargs
:param info:
:param kwargs: id is the only thing required
:return: A list of versions
"""
return resolve_version_instance(JurisdictionVersionedType, jurisdiction_resolver, **kwargs)
def jurisdiction_resolver(manager_method, **kwargs):
"""
Resolves the jurisdictions for model get_jurisdiction_model()
:param manager_method: 'filter', 'get', or 'count'
:param kwargs: Filter arguments for the Jurisdiction
:return:
"""
q_expressions = process_filter_kwargs(Jurisdiction, **kwargs)
return getattr(Jurisdiction.objects, manager_method)(
*q_expressions
)
jurisdiction_mutation_config = dict(
class_name='Jurisdiction',
crud={
CREATE: 'createJurisdiction',
UPDATE: 'updateJurisdiction'
},
resolve=guess_update_or_create
)
class UpsertJurisdiction(Mutation):
"""
Abstract base class for mutation
"""
jurisdiction = Field(JurisdictionType)
@transaction.atomic
def mutate(self, info, jurisdiction_data=None):
deleted_jurisdiction_response = delete_if_marked_for_delete(
Jurisdiction, UpsertJurisdiction, 'jurisdiction',
jurisdiction_data
)
if deleted_jurisdiction_response:
return deleted_jurisdiction_response
# Make sure that all props are unique that must be, either by modifying values or erring.
modified_jurisdiction_data = enforce_unique_props(jurisdiction_fields, jurisdiction_data)
update_or_create_values = input_type_parameters_for_update_or_create(jurisdiction_fields,
modified_jurisdiction_data)
jurisdiction, created = update_or_create_with_revision(Jurisdiction, update_or_create_values)
return UpsertJurisdiction(jurisdiction=jurisdiction)
class CreateJurisdiction(UpsertJurisdiction):
"""
Create Jurisdiction mutation class
"""
class Arguments:
jurisdiction_data = type('CreateJurisdictionInputType', (InputObjectType,),
input_type_fields(jurisdiction_fields, CREATE, JurisdictionType))(required=True)
class UpdateJurisdiction(UpsertJurisdiction):
"""
Update Jurisdiction mutation class
"""
class Arguments:
jurisdiction_data = type('UpdateJurisdictionInputType', (InputObjectType,),
input_type_fields(jurisdiction_fields, UPDATE, JurisdictionType))(required=True)
class JurisdictionMutation(graphene.ObjectType):
create_jurisdiction = CreateJurisdiction.Field()
update_jurisdiction = UpdateJurisdiction.Field()
graphql_update_or_create_jurisdiction = graphql_update_or_create(jurisdiction_mutation_config, jurisdiction_fields)
graphql_query_jurisdictions = graphql_query(JurisdictionType, jurisdiction_fields, 'jurisdictions')
def graphql_query_jurisdictions_limited(jurisdiction_fields):
return graphql_query(JurisdictionType, jurisdiction_fields, 'jurisdictions')
graphql_query_jurisdictions_paginated = graphql_query(
JurisdictionPaginatedType,
jurisdiction_paginated_fields,
'jurisdictionsPaginated'
)
graphql_query_jurisdictions_versioned = graphql_query(
JurisdictionVersionedType,
jurisdiction_versioned_fields,
'jurisdictionsVersioned'
) | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/jurisdiction/jurisdiction_schema.py | 0.551091 | 0.259532 | jurisdiction_schema.py | pypi |
import graphene
from django.db import transaction
from graphene import InputObjectType, Mutation, Field, ObjectType
from graphene_django.types import DjangoObjectType
from graphql_jwt.decorators import login_required
from rescape_graphene import REQUIRE, graphql_update_or_create, graphql_query, guess_update_or_create, \
CREATE, UPDATE, input_type_parameters_for_update_or_create, input_type_fields, merge_with_django_properties, \
DENY, FeatureCollectionDataType, resolver_for_dict_field, increment_prop_until_unique
from rescape_graphene import enforce_unique_props
from rescape_graphene.graphql_helpers.schema_helpers import process_filter_kwargs, update_or_create_with_revision, \
top_level_allowed_filter_arguments
from rescape_graphene.schema_models.django_object_type_revisioned_mixin import reversion_and_safe_delete_types, \
DjangoObjectTypeRevisionedMixin
from rescape_python_helpers import ramda as R
from rescape_region.helpers.sankey_helpers import add_sankey_graph_to_resource_dict
from rescape_region.models.resource import Resource
from rescape_region.schema_models.scope.region.region_schema import RegionType
from rescape_region.schema_models.resource_data_schema import ResourceDataType, resource_data_fields
class ResourceType(DjangoObjectType, DjangoObjectTypeRevisionedMixin):
id = graphene.Int(source='pk')
class Meta:
model = Resource
raw_resource_fields = merge_with_django_properties(ResourceType, dict(
id=dict(create=DENY, update=REQUIRE),
key=dict(create=REQUIRE, unique_with=increment_prop_until_unique(Resource, None, 'key', R.pick(['deleted']))),
name=dict(create=REQUIRE),
# This refers to the Resource, which is a representation of all the json fields of Resource.data
data=dict(graphene_type=ResourceDataType, fields=resource_data_fields, default=lambda: dict()),
# This is a Foreign Key. Graphene generates these relationships for us, but we need it here to
# support our Mutation subclasses and query_argument generation
# For simplicity we limit fields to id. Mutations can only us id, and a query doesn't need other
# details of the resource--it can query separately for that
region=dict(graphene_type=RegionType,
fields=merge_with_django_properties(RegionType, dict(id=dict(create=REQUIRE)))),
**reversion_and_safe_delete_types
))
# Modify data field to use the resolver.
# I guess there's no way to specify a resolver upon field creation, since graphene just reads the underlying
# Django model to generate the fields
ResourceType._meta.fields['data'] = Field(
ResourceDataType,
resolver=resolver_for_dict_field
)
# Modify the geojson field to use the geometry collection resolver
ResourceType._meta.fields['geojson'] = Field(
FeatureCollectionDataType,
resolver=resolver_for_dict_field
)
resource_fields = merge_with_django_properties(ResourceType, raw_resource_fields)
class ResourceQuery(ObjectType):
id = graphene.Int(source='pk')
resources = graphene.List(
ResourceType,
**top_level_allowed_filter_arguments(resource_fields, ResourceType)
)
@login_required
def resolve_resources(self, info, **kwargs):
q_expressions = process_filter_kwargs(Resource, **R.merge(dict(deleted__isnull=True), kwargs))
return Resource.objects.filter(
*q_expressions
)
resource_mutation_config = dict(
class_name='Resource',
crud={
CREATE: 'createResource',
UPDATE: 'updateResource'
},
resolve=guess_update_or_create
)
class UpsertResource(Mutation):
"""
Abstract base class for mutation
"""
resource = Field(ResourceType)
@transaction.atomic
@login_required
def mutate(self, info, resource_data=None):
# We must merge in existing resource.data if we are updating
if R.has('id', resource_data):
# New data gets priority, but this is a deep merge.
resource_data['data'] = R.merge_deep(
Resource.objects.get(id=resource_data['id']).data,
R.prop_or({}, 'data', resource_data)
)
# Modifies defaults value to add .data.graph
# We could decide in the future to generate this derived data on the client, but it's easy enough to do here
modified_resource_data = enforce_unique_props(resource_fields, resource_data)
# Make sure that all props are unique that must be, either by modifying values or erring.
update_or_create_values = input_type_parameters_for_update_or_create(resource_fields, modified_resource_data)
# Add the sankey data unless we are updating the instance without updating instance.data
update_or_create_values_with_sankey_data = R.merge(update_or_create_values, dict(
defaults=add_sankey_graph_to_resource_dict(
update_or_create_values['defaults']
)
)) if R.has('defaults', update_or_create_values) else update_or_create_values
resource, created = update_or_create_with_revision(Resource, update_or_create_values_with_sankey_data)
return UpsertResource(resource=resource)
class CreateResource(UpsertResource):
"""
Create Resource mutation class
"""
class Arguments:
resource_data = type('CreateResourceInputType', (InputObjectType,),
input_type_fields(resource_fields, CREATE, ResourceType))(required=True)
class UpdateResource(UpsertResource):
"""
Update Resource mutation class
"""
class Arguments:
resource_data = type('UpdateResourceInputType', (InputObjectType,),
input_type_fields(resource_fields, UPDATE, ResourceType))(required=True)
class ResourceMutation(graphene.ObjectType):
create_resource = CreateResource.Field()
update_resource = UpdateResource.Field()
graphql_update_or_create_resource = graphql_update_or_create(resource_mutation_config, resource_fields)
graphql_query_resources = graphql_query(ResourceType, resource_fields, 'resources') | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/resource/resource_schema.py | 0.54698 | 0.248101 | resource_schema.py | pypi |
from rescape_python_helpers import ramda as R
from rescape_region.models.resource import Resource
from rescape_region.helpers.sankey_helpers import generate_sankey_data
sample_settings = dict(
settings=dict(
defaultLocation=[4.3517, 50.8503],
# The columns of the raw_data
columns=[
'siteName',
'location',
'coordinates',
'junctionStage',
'annualTonnage'
],
# The column name used to name each stage
stageKey='junctionStage',
# The column used for node and link values
valueKey='annualTonnage',
# The column of the node location, normally a string representing a 2 element array representing a lat/lon
locationKey='coordinates',
nodeNameKey='siteName',
stages=[
dict(key='source', name='Source', targets=['conversion']),
dict(key='conversion', name='Conversion', targets=['distribution']),
dict(key='distribution', name='Distribution', targets=['demand']),
dict(key='demand', name='Demand', targets=['reconversion', 'sink']),
dict(key='reconversion', name='Reconversion', targets=['demand']),
dict(key='sink', name='Sink', targets=[])
]
)
)
# Map the settings to merge it into each resource
# Settings are stored in resource.data.settings
sample_resources = R.map(
lambda resource_data: R.merge_deep(
resource_data,
dict(
data=sample_settings
)
),
[
dict(
key='minerals',
name='Minerals',
data=dict(
material='Minerals',
rawData=[
'Other Global Imports;Shipments, location generalized;51.309933, 3.055030;Source;22,469,843',
'Knauf (Danilith) BE;Waregemseweg 156-142 9790 Wortegem-Petegem, Belgium;50.864762, 3.479308;Conversion;657,245',
"MPRO Bruxelles;Avenue du Port 67 1000 Bruxelles, Belgium;50.867486, 4.352543;Distribution;18,632",
'Residential Buildings (all typologies);Everywhere in Brussels;NA;Demand;3,882,735',
'Duplex House Typology;Everywhere in Brussels;NA;Demand;13,544',
'Apartment Building Typology;Everywhere in Brussels;NA;Demand;34,643',
'New West Gypsum Recycling;9130 Beveren, Sint-Jansweg 9 Haven 1602, Kallo, Belgium;51.270229, 4.261048;Reconversion;87,565',
'Residential Buildings (all typologies);Everywhere in Brussels;NA;Sink;120,000',
'RecyPark South;1190 Forest, Belgium;50.810799, 4.314789;Sink;3,130',
'RecyPark Nord;Rue du Rupel, 1000 Bruxelles, Belgium;50.880181, 4.377136;Sink;1,162'
]
)
),
dict(
key='metals',
name='Metals',
data=dict(
material='Metals',
rawData=[
'Other Global Imports;Shipments, location generalized;51.309933, 3.055030;Source;367,689',
'Arcelor Steel Belgium;Lammerdries 10, 2440 Geel, Belgium;51.145051, 4.939373;Conversion;27,872',
'Duplex House Typology;Everywhere in Brussels;NA;Demand;3,048',
'Apartment Building Typology;Everywhere in Brussels;NA;Demand;18,548',
'Residential Buildings (all typologies);Everywhere in Brussels;NA;Demand;75,404',
'Metallo Belgium;Nieuwe Dreef 33, 2340 Beerse, Belgium;51.318025, 4.817432;Reconversion;54,000',
'Private Sector Collection;Everywhere in Brussels;NA;Sink;96,316',
'RecyPark South;1190 Forest, Belgium;50.810799, 4.314789;Sink;101',
'RecyPark Nord;Rue du Rupel, 1000 Bruxelles, Belgium;50.880181, 4.377136;Sink;67'
]
)
),
dict(
key='wood',
name='Wood',
data=dict(
material='Wood',
rawData=[
'Forêt de Soignes;Watermael-Boitsfort Belgium ;50.777072, 4.409960;Source;6,288',
'Germany Imports;Germany, nearest point;50.786952, 6.102697;Source;66,812',
'Netherlands Imports;Netherlans, nearest point;51.467197, 4.609125;Source;52,352',
'Other Global Imports;Shipments, location generalized;51.309933, 3.055030;Source;323,384',
'Barthel Pauls Sawmill;Pôle Ardenne Bois 1, 6671 Bovigny, Belgium;50.259872, 5.933474;Conversion;200,430',
"Lochten & Germeau;Bd de l’Humanité, 51, 1190 Vorst, Belgium;50.820974, 4.314469;Distribution; NA, only for directional/path",
'Duplex House Typology;Everywhere in Brussels;NA;Demand;1,955',
'Apartment Building Typology;Everywhere in Brussels;NA;Demand;11,250',
'Residential Buildings (all typologies);Everywhere in Brussels;NA;Demand;45,659',
'Rotor Deconstruction;Prévinairestraat / Rue Prévinaire 58 1070 Anderlecht;50.839714, 4.352730;Reconversion;15,462',
'PAC Uccle;Boulevard de la Deuxième Armée Britannique 625-667 1190 Forest, Belgium;50.801647, 4.305641;Sink;189',
'PAC Saint-Josse;Rue Verboeckhaven 39-17 1210 Saint-Josse-ten-Noode, Belgium;50.854094, 4.375173;Sink;126',
'PAC Woluwe-Saint-Pierre;Avenue du Parc de Woluwe 86-44 1160 Auderghem, Belgium;50.823228, 4.427453;Sink;63.08',
"PAC d’Auderghem/Watermael-Boitsfort;1860 chaussée de Wavre, 1160 Auderghem;50.809948, 4.445271;Sink;252.32",
"RecyPark South;1190 Forest, Belgium;50.810799, 4.314789;Sink;668",
"RecyPark Nord;Rue du Rupel, 1000 Bruxelles, Belgium;50.880181, 4.377136;Sink;445"
]
)
)
]
)
def delete_sample_resources():
Resource.objects.all().delete()
@R.curry
def create_sample_resource(region, resource_dict):
# Generate our sample resources, computing and storing their Sankey graph data
graph = generate_sankey_data(resource_dict)
data = R.merge(
R.prop('data', resource_dict),
dict(
graph=graph
)
)
# Save the resource with the complete dataa
resource = Resource(**R.merge(resource_dict, dict(region=region, data=data)))
resource.save()
return resource
def create_sample_resources(regions):
# Convert all sample resource dicts to persisted Resource instances
resources = R.map(create_sample_resource(R.head(regions)), sample_resources)
return resources | /rescape-region-0.2.37.tar.gz/rescape-region-0.2.37/rescape_region/schema_models/resource/resource_sample.py | 0.622918 | 0.206534 | resource_sample.py | pypi |
# Reservoir Computers and Chaotic Systems
This package contains an ode based reservoir computer for learning time series data.
The package also includes functions for generating and plotting time series data for three chaotic systems.
It additionally contains a module that implements hyperparameter optimization for reservoir computers via the sherpa package.
## Installation
The package is hosted on PyPi and can be installed with pip:
```
pip install rescomp
```
Alternatively, users can download the repository and add the location of the repo to their Python path.
Import the package with `import rescomp as rc`.
## Chaotic Systems
Currently, we support code to generate time series on three chaotic attractors. Time series can be generated with the `orbit` function and plotted in 3D with `plot3d` or in 2D with `plot2d`. (Plots are displayed in a random color so call the plot function again or supply color(s) to the keyword argument if it looks bad.)
1. Thomas' cyclically symmetric attractor
```
t, U = rc.orbit("thomas", duration=1000, dt=0.1)
fig = rc.plot3d(U)
```

2. The Rossler attractor
```
t, U = rc.orbit("rossler", duration=100, dt=0.01)
fig = rc.plot3d(U)
```

3. The Lorenz attractor
```
t, U = rc.orbit("lorenz", duration=100, dt=0.01)
fig = rc.plot3d(U)
```

## Reservoir Computer Class
The package contains two options for reservoir computers: `ResComp` and `DrivenResComp`. The driven reservoir computers are still in beta stage but can be used for designing control algorithms [1]. Here is an example of learning and predicting Thomas' cyclically symetric attractor:
#### Train and Test
The `train_test_orbit` function returns training and testing sequences on the attractor. The test sequence immidiately follows the training sequence.
```
tr, Utr, ts, Uts = rc.train_test_orbit("thomas", duration=1000, dt=0.1)
```
Initialize the **default** reservoir computer and train on the test data with:
```
rcomp_default = rc.ResComp()
rcomp_default.train(tr, Utr)
```
Take the final state of the reservoir nodes and allow it to continue to evolve to predict what happens next.
```
r0 = rcomp_default.r0
pre = rcomp_default.predict(ts, r0=r0)
fig = rc.plot3d(pre)
```

This doesn't look much like Thomas' attractor, suggesting that these parameters are not optimal.
#### Reservoir Hyperparameters
Optimized hyper parameters for each system are included in the package. Initialize a reservoir with optimized hyper parameters as follows:
```
hyper_parameters = rc.SYSTEMS["thomas"]["rcomp_params"]
rcomp = rc.ResComp(**hyper_parameters)
```
Train and predict as before.
```
rcomp.train(tr, Utr)
r0 = rcomp.r0
pre = rcomp.predict(ts, r0=r0)
fig = rc.plot3d(pre)
```

This prediction looks much more like Thomas' attractor.
## Hyperparameter Optimization
The `rescomp.optimizer` package contains a class, `ResCompOptimizer`, that allows for easily performing hyperparameter optimization on a `ResComp` or `DrivenResComp` object:
```
from rescomp.optimizer import ResCompOptimizer
rcopt = ResCompOptimizer('thomas', 'relax', 'random', 'augmented')
rcopt.run_optimization(50, 20)
optimized_hyperparams = rcopt.get_best_result()
```
Also of note are the methods `generate_orbits()` and `run_tests()`.
`generate_orbits()` will generate a given number of orbits from the system as well as the reservoir computer's prediction, which is useful for visual comparisons.
`run_tests()` will test the reservoir computer on continued and random predictions with the given hyperparameters, as well as calculate the derivative fit of its predictions and estimate the Lyapunov exponent of the reservoir dynamical system.
There are four built-in systems: the Lorenz, Thomas, and Rossler chaotic attractors, as described above; and a set of data from a soft robot dynamical system.
Other systems can be created by extending the `rescomp.optimizer.System` class, and can be passed to the optimizer instead of the system string.
There is also a script module, `rescomp.opt_then_test`, that will run hyperparameter optimization on a `ResComp` on a given system, run various tests on the optimized hyperparameters, and save the results.
The script can be run as follows:
```
python3 -m rescomp.opt_then_test [args]
```
For details on what parameters it accepts, run it as:
```
python3 -m rescomp.opt_then_test -h
```
## References
[1] Griffith, A., Pomerance, A., Gauthier, D.. [Forecasting Chaotic Systems with Very Low Connectivity Reservoir
Computers](https://arxiv.org/pdf/1910.00659.pdf) (2019)
| /rescomp-0.2.1.tar.gz/rescomp-0.2.1/README.md | 0.586641 | 0.991899 | README.md | pypi |
.. _tutorial-create:
================================
Create, modify and organize data
================================
To begin, we need some sample data to work with. You may use your own reads
(.fastq) files, or download an example set we have provided:
.. literalinclude:: files/tutorial-create.py
:lines: 2-13
.. note::
To avoid copy-pasting of the commands, you can
:download:`download all the code <files/tutorial-create.py>` used in this section.
Organize resources
==================
Before all else, one needs to prepare space for work. In our case, this
means creating a "container" where the produced data will reside. So
let's create a collection and than put some data inside!
.. literalinclude:: files/tutorial-create.py
:lines: 15-16
Upload files
============
We will upload fastq single end reads with the `upload-fastq-single`_ process.
.. _upload-fastq-single: http://resolwe-bio.readthedocs.io/en/latest/catalog-definitions.html#process-upload-fastq-single
.. literalinclude:: files/tutorial-create.py
:lines: 18-25
What just happened? First, we chose a process to run, using its slug
``upload-fastq-single``. Each process requires some inputs---in this case there
is only one input with name ``src``, which is the location of reads on our
computer. Uploading a fastq file creates a new ``Data`` on the server
containing uploaded reads. Additionally, we ensured that the new
``Data`` is put inside ``test_collection``.
The upload process also created a Sample object for the reads data to be
associated with. You can access it by:
.. literalinclude:: files/tutorial-create.py
:lines: 27
.. note::
You can also upload your files by providing url. Just replace path to your
local files with the url. This comes handy when your files are large and/or
are stored on a remote server and you don't want to download them to your
computer just to upload them to Resolwe server again...
Modify data
===========
Both ``Data`` with reads and ``Sample`` are owned by you and you have
permissions to modify them. For example:
.. literalinclude:: files/tutorial-create.py
:lines: 29-31
Note the ``save()`` part! Without this, the change is only applied locally (on
your computer). But calling ``save()`` also takes care that all changes are
applied on the server.
.. note::
Some fields cannot (and should not) be changed. For example, you cannot
modify ``created`` or ``contributor`` fields. You will get an error if you
try.
Annotate Samples and Data
=========================
The obvious next thing to do after uploading some data is to annotate it.
Annotations are encoded as bundles of descriptors, where each descriptor
references a value in a descriptor schema (*i.e.* a template). Annotations for
data objects, samples, and collections each follow a different descriptor
format. For example, a reads data object can be annotated with the 'reads'
descriptor schema, while a sample can be annotated by the 'sample' annotation
schema. Each data object that is associated with the sample is also connected
to the sample's annotation, so that the annotation for a sample (or collection)
represents all Data objects attached to it. `Descriptor schemas`_ are described
in detail (with `accompanying examples`_) in the
`Resolwe Bioinformatics documentation`_.
.. _Resolwe Bioinformatics documentation: http://resolwe-bio.readthedocs.io
.. _Descriptor schemas: https://resolwe-bio.readthedocs.io/en/latest/descriptor.html
.. _accompanying examples: https://github.com/genialis/resolwe-bio/tree/master/resolwe_bio/descriptors
Here, we show how to annotate the ``reads`` data object by defining the
descriptor information that populates the annotation fields as defined in the
'reads' descriptor schema:
.. literalinclude:: files/tutorial-create.py
:lines: 33-42
We can annotate the sample object using a similar process with a 'sample'
descriptor schema:
.. literalinclude:: files/tutorial-create.py
:lines: 44-59
.. warning::
Many descriptor schemas have required fields with a limited set of choices
that may be applied as annotations. For example, the 'species' annotation
in a sample descriptor must be selected from the list of options in the
`sample descriptor schema`_, represented by its Latin name.
.. _sample descriptor schema: https://github.com/genialis/resolwe-bio/blob/master/resolwe_bio/descriptors/sample.yml
We can also define descriptors and descriptor schema directly when calling
``res.run`` function. This is described in the section about the ``run()``
method below.
Run analyses
============
Various bioinformatic processes are available to properly analyze sequencing
data. Many of these pipelines are available via Resolwe SDK, and are listed in
the `Process catalog`_ of the `Resolwe Bioinformatics documentation`_.
.. _Process catalog: http://resolwe-bio.readthedocs.io/en/latest/catalog.html
.. _Resolwe Bioinformatics documentation: http://resolwe-bio.readthedocs.io
After uploading reads file, the next step is to align reads to a genome. We
will use STAR aligner, which is wrapped in a process with slug
``alignment-star``. Inputs and outputs of this process are described in
`STAR process catalog`_. We will define input files and the process will run
its algorithm that transforms inputs into outputs.
.. _STAR process catalog: https://resolwe-bio.readthedocs.io/en/latest/catalog-definitions.html#process-alignment-star
.. literalinclude:: files/tutorial-create.py
:lines: 67-76
Lets take a closer look to the code above. We defined the alignment process, by
its slug ``'alignment-star'``. For inputs we defined data objects ``reads``
and ``genome``. ``Reads`` object was created with 'upload-fastq-single'
process, while ``genome`` data object was already on the server and we just
used its slug to identify it. The ``alignment-star`` processor will
automatically take the right files from data objects, specified in inputs and
create output files: ``bam`` alignment file, ``bai`` index and some more...
You probably noticed that we get the result almost instantly, while the
typical assembling process runs for hours. This is because
processing runs asynchronously, so the returned data object does not
have an OK status or outputs when returned.
.. literalinclude:: files/tutorial-create.py
:lines: 78-85
Status ``OK`` indicates that processing has finished successfully, but you will
also find other statuses. They are given with two-letter abbreviations. To
understand their meanings, check the
:obj:`status reference <resdk.resources.Data.status>`. When processing is done,
all outputs are written to disk and you can inspect them:
.. literalinclude:: files/tutorial-create.py
:lines: 87-88
Until now, we used ``run()`` method twice: to upload reads (yes, uploading
files is just a matter of using an upload process) and to run alignment. You
can check the full signature of the :obj:`run() <resdk.Resolwe.run>` method.
Run workflows
=============
Typical data analysis is often a sequence of processes. Raw data or initial
input is analysed by running a process on it that outputs some data. This data
is fed as input into another process that produces another set of outputs. This
output is then again fed into another process and so on. Sometimes, this
sequence is so commonly used that one wants to simplify it's execution. This
can be done by using so called "workflow". Workflows are special processes that
run a stack of processes. On the outside, they look exactly the same as a
normal process and have a process slug, inputs... For example, we
can run workflow "General RNA-seq pipeline" on our reads:
.. literalinclude:: files/tutorial-create.py
:lines: 90-100
Solving problems
================
Sometimes the data object will not have an "OK" status. In such case, it is
helpful to be able to check what went wrong (and where). The :obj:`stdout()
<resdk.resources.Data.stdout>` method on data objects can help---it returns the
standard output of the data object (as string). The output is long but
exceedingly useful for debugging. Also, you can inspect the info, warning and
error logs.
.. literalinclude:: files/tutorial-create.py
:lines: 104-117
| /resdk-19.0.1.tar.gz/resdk-19.0.1/docs/tutorial-create.rst | 0.938583 | 0.680135 | tutorial-create.rst | pypi |
.. _resdk-tables:
============
ReSDK Tables
============
ReSDK tables are helper classes for aggregating collection data in
tabular format. Currently, we have four flavours:
- :ref:`rna-tables`
- :ref:`methylation-tables`
- :ref:`microarray-tables`
- :ref:`variant-tables`
.. _rna-tables:
RNATables
=========
Imagine you are modelling gene expression data from a given collection.
Ideally, you would want all expression values organized in a table where
rows represents samples and columns represent genes. Class
``RNATables`` gives you just that (and more).
We will present the functionality of ``RNATables`` through an
example. We will:
- Create an instance of ``RNATables`` and examine it's attributes
- Fetch raw expressions and select `TIS signature genes`_ with
sufficient coverage
- Normalize expression values (log-transform) and visualize samples in a
simple PCA plot
.. _`TIS signature genes`: https://translational-medicine.biomedcentral.com/articles/10.1186/s12967-019-2100-3
First, connect to a Resolwe server, pick a collection and create
and instance of ``RNATables``::
import resdk
from resdk.tables import RNATables
res = resdk.Resolwe(url='https://app.genialis.com/')
res.login()
collection = res.collection.get("sum149-fresh-for-rename")
sum149 = RNATables(collection)
Object ``sum149`` is an instance of ``RNATables`` and has many attributes. For a complete list see
the :ref:`reference`, here we list the most commonly used ones::
# Expressions raw counts
sum149.rc
# Expressions normalized counts
sum149.exp
# See normalization method
sum149.exp.attrs["exp_type"]
# Sample metadata
sum149.meta
# Sample QC metrics
sum149.qc
# Dictionary that maps gene ID's into gene symbols
sum149.readable_columns
# This is handy to rename column names (gene ID's) to gene symbols
sum149.rc.rename(columns=sum149.readable_columns)
.. note::
Expressions and metadata are cached in memory as well as on disk. At
each time they are re-requested a check is made that local and server side
of data is synced. If so, cached data is used. Otherwise, new data
will be pulled from server.
In our example we will only work with a set of `TIS signature genes`_::
TIS_GENES = ["CD3D", "IDO1", "CIITA", "CD3E", "CCL5", "GZMK", "CD2", "HLA-DRA", "CXCL13", "IL2RG", "NKG7", "HLA-E", "CXCR6", "LAG3", "TAGAP", "CXCL10", "STAT1", "GZMB"]
We will identify low expressed genes and only keep the ones with average raw
expression above 20::
tis_rc = sum149.rc.rename(columns=sum149.readable_columns)[TIS_GENES]
mean = tis_rc.mean(axis=0)
high_expressed_genes = mean.loc[mean > 20].index
Now, lets select TPM normalized expressions and keep only highly
expressed tis genes. We also transform to ``log2(TPM + 1)``::
import numpy as np
tis_tpm = sum149.exp.rename(columns=sum149.readable_columns)[high_expressed_genes]
tis_tpm_log = np.log(tis_tpm + 1)
Finally, we perform PCA and visualize the results::
from sklearn.decomposition import PCA
pca = PCA(n_components=2, whiten=True)
Y = pca.fit_transform(tis_tpm_log)
import matplotlib.pyplot as plt
for ((x, y), sample_name) in zip(Y, tis_tpm.index):
plt.plot(x, y, 'bo')
plt.text(x, y, sample_name)
plt.xlabel(f"PC1 ({pca.explained_variance_ratio_[0]})")
plt.ylabel(f"PC2 ({pca.explained_variance_ratio_[1]})")
plt.show()
.. _methylation-tables:
MethylationTables
=================
Similar as ``RNATables`` provide access to raw counts and normalized
expression values of RNA data, ``MethylationTables`` allow for fast
access of beta and m-values of methylation data::
meth = resdk.tables.MethylationTables(<collection-with-methylation-data>)
# Methylation beta-values
meth.beta
# Methylation m-values
meth.mval
.. _microarray-tables:
MATables
========
Similar as ``RNATables`` provide access to raw counts and normalized
expression values of RNA data, ``MATables`` allow for fast
access of expression values per probe of microarray::
ma = resdk.tables.MATables(<collection-with-microarray-data>)
# Microarray expressions values (columns are probe ID's)
ma.exp
.. _variant-tables:
VariantTables
=============
Similar as ``RNATables`` provide access to raw counts and normalized
expression values of RNA data, ``VariantTables`` allow for fast
access of variant data present in Data of type ``data:mutationstable``::
vt = resdk.tables.VariantTables(<collection-with-variant-data>)
vt.variants
The output of the above would look something like this:
========= ============ ============
sample_id chr1_123_C>T chr1_126_T>C
========= ============ ============
101 2 NaN
102 0 2
========= ============ ============
In rows, there are sample ID's. In columns there are variants where each
variant is given as:
``<chromosome>_<position>_<nucleotide-change>``.
Values in table can be:
- 0 (wild-type / no mutation)
- 1 (heterozygous mutation),
- 2 (homozygous mutation)
- NaN (QC filters are failing - mutation status is unreliable)
Inspecting depth
----------------
The reason for NaN values may be that the read depth on certain position
is too low for GATK to reliably call a variant. In such case, it is
worth inspecting the depth or depth per base::
# Similar as above but one gets depth on particular variant / sample
vt.depth
# One can also get depth for specific base
vt.depth_a
vt.depth_c
vt.depth_t
vt.depth_g
Filtering mutations
-------------------
Process ``mutations-table`` on Genialis Platform accepts either ``mutations`` or
``geneset`` input which specifies the genes of interest. It restricts the scope
of mutation search to just a few given genes.
However, it can happen that not all the samples have the same ``mutations`` or
``geneset`` input. In such cases, it makes little sense to merge the information
about mutations from multiple samples. By default, ``VariantTables`` checks that
all Data is computed with same ``mutations`` / ``geneset`` input. If this is
not true, it will raise an error.
But if you provide additional argument ``geneset`` it will limit the
mutations to only those in the given geneset. An example::
# Sample 101 has mutations input "FHIT, BRCA2"
# Sample 102 has mutations input "BRCA2"
# This would cause error, since the mutations inputs are not the same
vt = resdk.tables.VariantTables(<collection>)
vt.variants
# This would limit the variants to just the ones in BRCA2 gene.
vt = resdk.tables.VariantTables(<collection>, geneset=["BRCA2"])
vt.variants
| /resdk-19.0.1.tar.gz/resdk-19.0.1/docs/resdk-tables.rst | 0.931134 | 0.679048 | resdk-tables.rst | pypi |
import resdk
res = resdk.Resolwe(url='https://app.genialis.com')
res.login()
# Get example reads
example = res.data.get('resdk-example-reads')
# Download them to current working directory
example.download(
field_name='fastq',
download_dir='./',
)
# create a new collection object in your running instance of Resolwe (res)
test_collection = res.collection.create(name='Test collection')
# Upload FASTQ reads
reads = res.run(
slug='upload-fastq-single',
input={
'src': './reads.fastq.gz',
},
collection=test_collection,
)
reads.sample
# Change name
reads.name = 'My first data'
reads.save()
# define the chosen descriptor schema
reads.descriptor_schema = 'reads'
# define the descriptor
reads.descriptor = {
'description': 'Some free text...',
}
# Very important: save changes!
reads.save()
reads.sample.descriptor_schema = 'sample'
reads.sample.descriptor = {
'general': {
'description': 'This is a sample...',
'species': 'Homo sapiens',
'strain': 'F1 hybrid FVB/N x 129S6/SvEv',
'cell_type': 'glioblastoma',
},
'experiment': {
'assay_type': 'rna-seq',
'molecule': 'total_rna',
},
}
reads.sample.save()
# Get genome
genome_index = res.data.get('resdk-example-genome-index')
alignment = res.run(
slug='alignment-star',
input={
'genome': genome_index,
'reads': reads,
},
)
# Get the latest meta data from the server
alignment.update()
# See the process progress
alignment.process_progress
# Print the status of data
alignment.status
# See process output
alignment.output
# Run a workflow
res.run(
slug='workflow-bbduk-star-featurecounts-qc',
input={
'reads': reads,
'genome': res.data.get('resdk-example-genome-index'),
'annotation': res.data.get('resdk-example-annotation'),
'rrna_reference': res.data.get('resdk-example-rrna-index'),
'globin_reference': res.data.get('resdk-example-globin-index'),
}
)
# Update the data object to get the most recent info
alignment.update()
# Print process' standard output
print(alignment.stdout())
# Access process' execution information
alignment.process_info
# Access process' execution warnings
alignment.process_warning
# Access process' execution errors
alignment.process_error | /resdk-19.0.1.tar.gz/resdk-19.0.1/docs/files/tutorial-create.py | 0.450118 | 0.224002 | tutorial-create.py | pypi |
import datetime
import json
from typing import Union
from RDS import User
def initToken(obj: Union[str, dict]):
if isinstance(obj, (Token, OAuth2Token)):
return obj
if not isinstance(obj, (str, dict)):
raise ValueError("Given object not from type str or dict.")
from RDS.Util import try_function_on_dict
load = try_function_on_dict(
[
OAuth2Token.from_json,
OAuth2Token.from_dict,
LoginToken.from_json,
LoginToken.from_dict,
Token.from_json,
Token.from_dict,
]
)
return load(obj)
class Token:
"""
This token represents a simple username:password, but will not enforce anything for service.
"""
_service = None
_user = None
_access_token = None
def __init__(self, user: User, service, access_token: str):
from RDS import BaseService, LoginService
if not isinstance(service, BaseService):
raise ValueError(f"service parameter needs to be of type Service.")
if not isinstance(service, LoginService):
self.check_string(access_token, "access_token")
self._service = service
self._user = user
self._access_token = access_token
@staticmethod
def check_string(obj: str, string: str):
if not obj:
raise ValueError(f"{string} cannot be an empty string, was {obj}")
@property
def servicename(self):
return self._service.servicename
@property
def service(self):
return self._service
@property
def access_token(self):
return self._access_token
@property
def user(self):
return self._user
def __str__(self):
return json.dumps(self)
def __eq__(self, other):
"""
Returns True, if this object and other object have the same servicename and user. Otherwise false.
"""
return (
isinstance(other, (Token))
and self.service == other.service
and self.user == other.user
)
def to_json(self):
"""
Returns this object as a json string.
"""
data = {"type": self.__class__.__name__, "data": self.to_dict()}
return data
def to_dict(self):
"""
Returns this object as a dict.
"""
data = {
"service": self._service,
"access_token": self._access_token,
"user": self._user,
}
return data
@classmethod
def from_json(cls, tokenStr: str):
"""
Returns a token object from a json string.
"""
data = tokenStr
while (
type(data) is not dict
): # FIX for bug: JSON.loads sometimes returns a string
data = json.loads(data)
if "type" in data and str(data["type"]).endswith("Token") and "data" in data:
data = data["data"]
return cls.from_dict(data)
raise ValueError("not a valid token json string.")
@classmethod
def from_dict(cls, tokenDict: dict):
"""
Returns a token object from a dict.
"""
from RDS import Util
return cls(
Util.getUserObject(tokenDict["user"]),
Util.getServiceObject(tokenDict["service"]),
tokenDict["access_token"],
)
class LoginToken(Token):
"""Provides a token object, which enforces service configuration.
"""
def __init__(
self,
user: User,
service,
access_token: str
):
# Workaround for empty passwords in LoginTokens
super().__init__(user, service, "---")
self._access_token = access_token
from RDS import LoginService
if not isinstance(service, LoginService):
raise ValueError("parameter service is not a LoginService, was: {}".format(
service.__class__.__name__))
if service.userId and self.user is None:
raise ValueError(
"user is needed, because username must be provided for specified service.")
if service.password and (self.access_token is None or not self.access_token):
raise ValueError(
"access_token is needed, because password must be provided for specified service.")
class OAuth2Token(Token):
"""
Represents a token object.
"""
_refresh_token = None
_expiration_date = None
def __init__(
self,
user: User,
service,
access_token: str,
refresh_token: str = "",
expiration_date: datetime.datetime = None,
):
super().__init__(user, service, access_token)
from RDS import OAuth2Service
if not isinstance(service, OAuth2Service):
raise ValueError("parameter service is not an oauth2service, was: {}".format(
service.__class__.__name__))
if expiration_date is None:
expiration_date = datetime.datetime.now()
# remove check for empty string for refresh_token, because it could be an authorization_token
# self.check_string(refresh_token, "refresh_token")
if refresh_token:
self._refresh_token = refresh_token
self._expiration_date = expiration_date
@property
def refresh_token(self):
return self._refresh_token
@property
def expiration_date(self):
return self._expiration_date
def refresh(self):
return self.service.refresh(self)
def __eq__(self, obj):
"""
Check, if tokens are equal. You must not check if the refresh or access_tokens are equal,
because they could be changed already. Only servicename is relevant.
"""
return super().__eq__(obj)
def to_json(self):
"""
Returns this object as a json string.
"""
data = super().to_json()
data["type"] = self.__class__.__name__
data["data"].update(self.to_dict())
return data
def to_dict(self):
"""
Returns this object as a dict.
"""
data = super().to_dict()
data["refresh_token"] = self._refresh_token
data["expiration_date"] = str(self._expiration_date)
return data
@classmethod
def from_json(cls, tokenStr: str):
"""
Returns an oauthtoken object from a json string.
"""
data = tokenStr
while (
type(data) is not dict
): # FIX for bug: JSON.loads sometimes returns a string
data = json.loads(data)
if "type" in data and str(data["type"]).endswith("OAuth2Token"):
data = data["data"]
return cls.from_dict(data)
raise ValueError("not a valid token json string.")
@classmethod
def from_dict(cls, tokenDict: dict):
"""
Returns an oauthtoken object from dict.
"""
token = super(OAuth2Token, cls).from_dict(tokenDict)
return OAuth2Token(
token.user,
token.service,
token.access_token,
tokenDict["refresh_token"],
datetime.datetime.fromisoformat(tokenDict["expiration_date"]),
) | /research-data-services-common-0.53.tar.gz/research-data-services-common-0.53/RDS/Token.py | 0.715921 | 0.181118 | Token.py | pypi |
class ROParser():
def __init__(self, doc) -> None:
# "conformsTo": {
# "@id": "https://w3id.org/ro/crate/1.1"
# },
if "@graph" not in doc:
raise ValueError("Not a valid ROCrate file")
self.__version = "https://w3id.org/ro/crate/1.1"
self.__doc = doc
self.__rootId = self.__getRootIdentifier()
self.__root = self.getElement(self.__rootId)
def __getRootIdentifier(self):
for elem in self.__doc["@graph"]:
if elem["@id"] == "ro-crate-metadata.json":
version = elem["conformsTo"]["@id"]
if version != self.__version:
raise ValueError(
"Valid ROCrate file, but wrong version.\nWanted `https://w3id.org/ro/crate/1.1`, given `{}`".format(version))
return elem["about"]["@id"]
@property
def root(self):
return self.__root
@property
def rootIdentifier(self):
return self.__rootId
def getElement(self, id: str, doc: dict = None, expand: bool = False, clean: bool = False, clamps: int = None):
"""Gets the element with given id
Args:
id (str): The id you are searching for
doc (dict, optional): The json dict, you want to search for id. If None, it searches the given dict at this object. Defaults to None.
expand (bool, optional): Resolves all @id lookups in dicts. Defaults to False.
clean (bool, optional): Cleans up, all lists with only one element pulls the elements in first layer, all dicts with one element will be accessable directly. Defaults to False.
clamps (int, optional): All lists only returns this amount of elements. If none, returns all. Defaults to None.
Returns:
[type]: [description]
"""
if doc is None:
doc = self.__doc["@graph"]
temp = {}
for elem in doc:
if "@id" in elem and elem["@id"] == id:
temp = elem
result = {}
for key, value in temp.items():
if not "@" in key[0] and expand and isinstance(value, list):
result[key] = []
for elem in value:
try:
tmpRes = self.getElement(
elem["@id"], doc=doc, expand=expand, clean=clean, clamps=clamps)
except:
tmpRes = elem
result[key].append(tmpRes)
temp.update(result)
if clean:
return self.__clean(temp, clamps=clamps)
return temp
def __clean(self, docs, clamps):
if not isinstance(docs, (dict, list)):
return docs
if not isinstance(docs, list):
docs = [docs]
result = []
try:
for doc in docs:
temp = {}
for key, value in doc.items():
if not "@" in key[0]:
temp[key] = self.__clean(value, clamps)
result.append(temp)
except:
result = docs
if clamps is not None and isinstance(result, list) and len(result) >= clamps:
result = result[:clamps]
while len(result) == 1: # pull single values out of lists
try:
key, value = list(result.items())[0]
if not "@" in key[0]:
result = value
else:
raise ValueError
except Exception as e:
result = result[0]
return result | /research-data-services-common-0.53.tar.gz/research-data-services-common-0.53/RDS/ROParser.py | 0.722429 | 0.302871 | ROParser.py | pypi |
import importlib
import json
from .Service import initService, BaseService, LoginService, OAuth2Service
from .User import initUser
from .Token import initToken, Token
from typing import Union
import os
import requests
import logging
logger = logging.getLogger()
def parseUserId(obj: str):
# TODO: add regex here (\S+):\/\/(\S+?):(\S+)
service, user = obj.split("://")
userId, password = user.split(":")
if not "port-" in service:
service = "port-{}".format(service)
if password == "None":
password = None
if userId == "":
userId = None
if password == "":
password = None
return service, userId, password
def parseToken(token: Token):
serviceport = "{}".format(token.service.servicename)
if not "port-" in serviceport:
serviceport = "port-{}".format(serviceport)
data = {
"userId": "{}://{}:{}".format(serviceport, token.user.username, token.access_token)
}
return data
def getServiceObject(obj: Union[str, dict]):
"""Creates a service object, corresponding to the given object
Args:
obj (Union[str, dict]): The object needs to be a valid json or dictionary, created with to_json or to_dict method of a valid Service object
Returns:
BaseService: It is a Service object, which inherates from BaseService
"""
return initService(obj)
def getUserObject(obj: Union[str, dict]):
"""Creates a User object, corresponding to the given object
Args:
obj (Union[str, dict]): The object needs to be a valid json or dictionary, created with to_json or to_dict method of a valid User object
Returns:
User: It is a User object, which inherates from User
"""
return initUser(obj)
def getTokenObject(obj: Union[str, dict]):
"""Creates a Token object, corresponding to the given object
Args:
obj (Union[str, dict]): The object needs to be a valid json or dictionary, created with to_json or to_dict method of a valid Token object
Returns:
Token: It is a Token object, which inherates Token
"""
return initToken(obj)
def loadToken(userId: str, service: Union[str, BaseService]) -> str:
# FIXME make localhost dynamic for pactman
tokenStorageURL = os.getenv(
"USE_CASE_SERVICE_PORT_SERVICE", "http://localhost:3000"
)
# load access token from token-storage
try:
servicename = service.servicename
except:
servicename = service
if not servicename.startswith("port-"):
servicename = "port-{}".format(servicename)
result = requests.get(
f"{tokenStorageURL}/user/{userId}/service/{servicename}",
verify=(os.environ.get("VERIFY_SSL", "True") == "True"),
)
if result.status_code > 200:
return None
access_token = result.json()
logger.debug(f"got: {access_token}")
token = getTokenObject(access_token)
logger.debug("initialize type: {}, data: {}".format(
token.__class__.__name__, token.to_json()))
return token
def register_service(
service: BaseService
):
"""Register the given service to Token Storage
Args:
service (BaseService): The Service, which will be registered in Token Storage
Raises:
Exception: Raises, when there is something wrong in registering process.
Returns:
Bool: Returns True, when success, Otherwise False or raise Exception
"""
tokenStorage = os.getenv("CENTRAL_SERVICE_TOKEN_STORAGE")
if tokenStorage is None:
return False
data = service.to_dict()
headers = {"Content-Type": "application/json"}
response = requests.post(
f"{tokenStorage}/service",
json=data,
headers=headers,
verify=(os.environ.get("VERIFY_SSL", "True") == "True"),
)
if response.status_code != 200:
raise Exception(
"Cannot find and register Token Storage, msg:\n{}".format(
response.text)
)
response = response.json()
if response["success"]:
logger.info(
f"Registering {service.servicename} in token storage was successful.")
return True
logger.error(
f"There was an error while registering {service.servicename} to token storage.\nJSON: {response}"
)
return False
def load_class_from_json(jsonStr: str):
"""
Returns the class of the given json string.
"""
if not isinstance(jsonStr, str):
raise ValueError("Given parameter not a string.")
data = jsonStr
# FIX for json bug: Sometimes it returns a string.
while not isinstance(data, dict):
data = json.loads(data)
return internal_load_class(data)
def load_class_from_dict(data: dict):
"""
Returns the class of the given dict.
"""
return internal_load_class(data)
def initialize_object_from_json(jsonStr: str):
"""
Initialize and returns an object of the given json string.
This is the easiest way to reverse the to_json method for objects from our lib folder.
"""
return load_class_from_json(jsonStr).from_json(jsonStr)
def initialize_object_from_dict(jsonDict: dict):
"""
Initialize and returns an object of the given dict.
This is another easy way to reverse the to_json method for objects from our lib folder.
"""
return load_class_from_dict(jsonDict).from_dict(jsonDict["data"])
def internal_load_class(data: dict):
"""
For internal use only.
"""
if not isinstance(data, dict):
raise ValueError("Given parameter not a dict object.")
if "type" in data:
try:
klass = None
if data["type"].endswith("Token"):
mod = importlib.import_module("RDS.Token")
klass = getattr(mod, data["type"])
elif data["type"].endswith("Service"):
mod = importlib.import_module("RDS.Service")
klass = getattr(mod, data["type"])
elif data["type"].endswith("User"):
mod = importlib.import_module("RDS.User")
klass = getattr(mod, data["type"])
if klass is not None:
return klass
except Exception:
raise
raise ValueError("given parameter is not a valid class.")
raise ValueError("Type not specified in dict.")
def try_function_on_dict(func: list):
"""
This method trys the given functions on the given dictionary. Returns the first function, which returns a value for given dict.
Main purpose of this is the initialization of multiple Classes from json dicts.
Usage:
```python
func_list = [func1, func2, func3]
x = Util.try_function_on_dict(func_list)
object = x(objDict)
```
equals to:
```python
try:
try:
func1(objDict)
except:
pass
try:
func2(objDict)
except:
pass
try:
func3(objDict)
except:
pass
except:
raise Exception(...)
```
Raise an `Exception` with all raised exception as strings, if no function returns a value for the given jsonDict.
"""
def inner_func(jsonDict: dict):
nonlocal func
exp_list = []
for f in func:
try:
return f(jsonDict)
except Exception as e:
exp_list.append(e)
continue
raise Exception(
"The given jsonDict raise in all functions an exception.\ndata: {}, errors: \n{}".format(
jsonDict,
"\n".join(
[f"Error: {type(e)}, Msg: {str(e)}" for e in exp_list])
)
)
return inner_func
def monkeypatch(func_name: str = "to_json"):
""" Module that monkey-patches json module when it's imported so
JSONEncoder.default() automatically checks for a special "to_json()"
method and uses it to encode the object if found.
"""
from json import JSONEncoder, JSONDecoder
def to_default(self, obj):
return getattr(obj.__class__, func_name, to_default.default)(obj)
to_default.default = JSONEncoder.default # Save unmodified default.
JSONEncoder.default = to_default # Replace it.
# this part can only be used, if flask is installed. See: https://github.com/Sciebo-RDS/py-research-data-services-common#optional-dependencies
try:
from flask.json import JSONEncoder
from functools import wraps
def get_json_encoder(func_name: str = "to_json"):
""" Module that monkey-patches json module when it's imported so
JSONEncoder.default() automatically checks for a special "to_json()"
method and uses it to encode the object if found.
"""
class RDSEncoder(JSONEncoder):
def default(self, o):
method = getattr(o, func_name, JSONEncoder.default)
try:
return method()
except:
try:
return method(o)
except:
return method(self, o)
return RDSEncoder
def wrap_monkeypatch(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
key = "app"
if kwargs.get(key) is not None:
app = kwargs[key]
del kwargs[key]
app.json_encoder = get_json_encoder(*args, **kwargs)
else:
try:
from flask import current_app
current_app.json_encoder = get_json_encoder(
*args, **kwargs)
except:
pass
return fn(*args, **kwargs)
return wrapper
monkeypatch = wrap_monkeypatch(monkeypatch)
except:
pass | /research-data-services-common-0.53.tar.gz/research-data-services-common-0.53/RDS/Util.py | 0.544559 | 0.296123 | Util.py | pypi |
import json
from typing import Union
def initUser(obj: Union[str, dict]):
"""
Returns a User object for json String or dict.
"""
if isinstance(obj, (User)):
return obj
if not isinstance(obj, (str, dict)):
raise ValueError("Given object not from type str or dict.")
from RDS.Util import try_function_on_dict
load = try_function_on_dict([User.from_json, User.from_dict])
return load(obj)
class User:
"""
Represents a user, which can access services via tokens.
"""
_username = None
def __init__(self, username: str):
if not username:
raise ValueError("Username cannot be an empty string.")
self._username = username
@property
def username(self):
return self._username
def __str__(self):
return json.dumps(self)
def __eq__(self, obj):
if isinstance(obj, str):
try:
obj = User.from_json(obj)
except:
return False
return isinstance(obj, (User)) and self.username == obj.username
def to_json(self):
"""
Returns this object as a json string.
"""
data = {"type": self.__class__.__name__, "data": self.to_dict()}
return data
def to_dict(self):
"""
Returns this object as a dict.
"""
data = {"username": self._username}
return data
@classmethod
def from_json(cls, user: str):
"""
Returns an user object from a json string.
"""
data = user
while type(data) is not dict:
data = json.loads(data)
if "type" in data and str(data["type"]).endswith("User"):
data = data["data"]
if "username" in data:
return cls(data["username"])
raise ValueError("not a valid user object.")
@classmethod
def from_dict(cls, userDict: dict):
"""
Returns an user object from a dict.
"""
return User(userDict["username"]) | /research-data-services-common-0.53.tar.gz/research-data-services-common-0.53/RDS/User.py | 0.756268 | 0.190894 | User.py | pypi |
import numpy as np
import WrightTools as wt
def gauss(t, t0, fwhm):
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
return np.exp(-((t - t0) ** 2) / (2 * sigma ** 2))
def exp(t, t1, A1, B, t0):
# applies a heaviside step function
zero = t0
out = np.zeros(t.size)
out[t >= zero] = A1 * np.exp(-t[t >= zero] / t1)
out[t == zero] *= 0.5
out += B
return out
def biexp(t, t1, A1, t2, A2, B, t0):
# applies a heaviside step function
zero = t0
out = np.zeros(t.size)
out[t >= zero] = A1 * np.exp(-t[t >= zero] / t1) + A2 * np.exp(-t[t >= zero] / t2)
out[t == zero] *= 0.5
out += B
return out
def triexp(t, t1, A1, t2, A2, t3, A3, B, t0):
# applies a heaviside step function
zero = t0
out = np.zeros(t.size)
out[t >= zero] = (
A1 * np.exp(-t[t >= zero] / t1)
+ A2 * np.exp(-t[t >= zero] / t2)
+ A3 * np.exp(-t[t >= zero] / t3)
)
out[t == zero] *= 0.5
out += B
return out
def exp_fit_func(p, x):
# oversample grid. Then convolve, then interpolate convolution back to original grid
t = np.linspace(
x.min(), x.max(), 1024 * 4
) # need uniformly spaced grid to do convolution
t1, A1, B, t0, fwhm = p
t1, A1, B, fwhm = np.abs(t1), np.abs(A1), np.abs(B), np.abs(fwhm)
IRF = gauss(t - t.mean(), t0, fwhm)
IRF /= IRF.sum() # need area normalized function
decay = exp(t, t1, A1, B, t0)
model = np.convolve(decay, IRF, mode="same")
out = np.interp(x, t, model)
return out
def biexp_fit_func(p, x):
# oversample grid. Then convolve, then interpolate convolution back to original grid
t = np.linspace(
x.min(), x.max(), 1024 * 4
) # need uniformly spaced grid to do convolution
t1, A1, t2, A2, B, t0, fwhm = p
t1, A1, t2, A2, B, fwhm = (
np.abs(t1),
np.abs(A1),
np.abs(t2),
np.abs(A2),
np.abs(B),
np.abs(fwhm),
)
IRF = gauss(t - t.mean(), t0, fwhm)
IRF /= IRF.sum() # need area normalized function
decay = biexp(t, t1, A1, t2, A2, B, t0)
model = np.convolve(decay, IRF, mode="same")
out = np.interp(x, t, model)
return out
def triexp_fit_func(p, x):
# oversample grid. Then convolve, then interpolate convolution back to original grid
t = np.linspace(
x.min(), x.max(), 1024 * 4
) # need uniformly spaced grid to do convolution
t1, A1, t2, A2, t3, A3, B, t0, fwhm = p
t1, A1, t2, A2, t3, A3, B, fwhm = (
np.abs(t1),
np.abs(A1),
np.abs(t2),
np.abs(A2),
np.abs(t3),
np.abs(A3),
np.abs(B),
np.abs(fwhm),
)
IRF = gauss(t - t.mean(), t0, fwhm)
IRF /= IRF.sum() # need area normalized function
decay = triexp(t, t1, A1, t2, A2, t3, A3, B, t0)
model = np.convolve(decay, IRF, mode="same")
out = np.interp(x, t, model)
return out
def exp_param_guess(x, y):
t0 = x[np.argmax(y)]
B = np.mean(y[-100:])
fwhm = (x.min() + t0) / 2
A1 = y.max() / 2
t1 = np.mean(x)
p = [t1, A1, B, t0, fwhm]
return p
def biexp_param_guess(x, y):
t0 = x[np.argmax(y)]
B = np.mean(y[-100:])
fwhm = (x.min() + t0) / 2
A1 = y.max() / 2
t1 = np.mean(x)
A2 = A1 / 5
t2 = t1 * 10
p = [t1, A1, t2, A2, B, t0, fwhm]
return p
def triexp_param_guess(x, y):
t0 = x[np.argmax(y)]
B = np.mean(y[-100:])
fwhm = (x.min() + t0) / 2
A1 = y.max() / 2
t1 = np.mean(x)
A2 = A1 / 5
t2 = t1 * 10
A3 = A1 / 10
t3 = t1 * 100
p = [t1, A1, t2, A2, t3, A3, B, t0, fwhm]
return p
def sqrt_fit(p0, x, y, func):
sqrty = np.sqrt(y)
def sqrtfunc(p, x):
return np.sqrt(func(p, x))
pfit, perr = wt.kit.leastsqfitter(p0, x, sqrty, sqrtfunc)
return pfit, perr
def exp_fit(x, y):
p0 = exp_param_guess(x, y)
pfit, perr = sqrt_fit(p0, x, y, exp_fit_func)
ymodel = exp_fit_func(pfit, x)
return pfit, perr, ymodel
def biexp_fit(x, y):
p0 = biexp_param_guess(x, y)
pfit, perr = sqrt_fit(p0, x, y, biexp_fit_func)
ymodel = biexp_fit_func(pfit, x)
return pfit, perr, ymodel
def triexp_fit(x, y):
p0 = triexp_param_guess(x, y)
pfit, perr = sqrt_fit(p0, x, y, triexp_fit_func)
ymodel = triexp_fit_func(pfit, x)
return pfit, perr, ymodel | /research_kit-0.1.1.tar.gz/research_kit-0.1.1/research_kit/fit.py | 0.759671 | 0.840423 | fit.py | pypi |
import WrightTools as wt
import numpy as np
import matplotlib.pyplot as plt
from . import fit as fit
def PL_g2_plot(ax, d):
ax.plot(d, linewidth=1)
ax.fill_between(d.delay.points, d.counts.points, alpha=0.3)
ax.set_xlim(d.delay.min(), d.delay.max())
# ax.set_ylim(0, d.counts.max() * 1.05)
text = "Area ratio: " + "{:.2e}".format(d.attrs["arearatio"])
bbox = dict(boxstyle="round", fc="blanchedalmond", ec="orange", alpha=0.5)
ax.text(
0.99,
0.96,
text,
horizontalalignment="right",
verticalalignment="top",
transform=ax.transAxes,
bbox=bbox,
)
def PL_picotime_plot(ax, d, fitting=1):
maxes = 10
mins = 1e4
for chan, color, i in zip(d.channels, ["C0", "C1", "C2"], [0, 1, 2]):
x = d.delay.points
y = chan.points
if y.max() > maxes:
maxes = y.max()
if y.min() < mins:
mins = y.min()
ax.plot(x, y, linewidth=1, alpha=.5, color=color)
# ax.fill_between(x, y, alpha=0.3, color=color)
if fitting in [1, 2, 3]:
ts = []
if fitting == 1:
pfit, perr, ymodel = fit.exp_fit(x, y)
t1, A1, B, t0, fwhm = pfit
ts.append(t1)
if fitting == 2:
pfit, perr, ymodel = fit.biexp_fit(x, y)
t1, A1, t2, A2, B, t0, fwhm = pfit
ts.append(t1)
ts.append(t2)
if fitting == 3:
pfit, perr, ymodel = fit.triexp_fit(x, y)
t1, A1, t2, A2, t3, A3, B, t0, fwhm = pfit
ts.append(t1)
ts.append(t2)
ts.append(t3)
label = "$\\mathsf{\\tau_i ="
for t in ts:
label = label + "\\;" + str(round(np.abs(t), 1)) + ","
label = label[:-1] + "\\;ns}$"
ax.plot(x, ymodel, color=color, linewidth=3, alpha=1, label=label)
ax.legend()
ax.set_xlim(x.min(), x.max())
ax.set_ylim(mins + 1, maxes * 1.1)
ax.set_yscale("log")
def PL_macrotime_plot(ax, d, col):
for chan in d.channels:
ax.plot(d.labtime.points, chan.points, linewidth=2)
# ax.set_ylim(chan.points.min() + 1, chan.points.max() * 1.05)
ax.set_xlim(d.labtime.min(), d.labtime.max())
text = "Records: " + "{:.2e}".format(col.attrs["Records"])
bbox = dict(boxstyle="round", fc="blanchedalmond", ec="orange", alpha=0.5)
ax.text(
0.99,
0.96,
text,
horizontalalignment="right",
verticalalignment="top",
transform=ax.transAxes,
bbox=bbox,
)
def PL_fig_plot(col, fitting=1):
fig, gs = wt.artists.create_figure(
width="double", nrows=3, default_aspect=0.25, hspace=0.7
)
axs = [plt.subplot(gs[i]) for i in range(3)]
ylabels = [
"$\\mathsf{counts \\; [Hz]}$",
"$\\mathsf{counts}$",
"$\\mathsf{cross-correlation}$",
]
xlabels = [
"$\\mathsf{labtime \\; [s]}$",
"$\\mathsf{delay \\; [ns]}$",
"$\\mathsf{\\tau/\\tau_{rep}}$",
]
for ax, xlabel, ylabel in zip(axs, xlabels, ylabels):
ax.grid()
wt.artists.set_ax_labels(ax=ax, xlabel=xlabel, ylabel=ylabel, label_fontsize=22)
PL_macrotime_plot(axs[0], col.macrohist, col)
PL_picotime_plot(axs[1], col.picohist, fitting)
PL_g2_plot(axs[2], col.g2hist)
axs[0].set_title(col.attrs["identifier"])
return fig, gs
def spectra_fig_plot(d):
fig, gs = wt.artists.create_figure(default_aspect=0.5)
ax = plt.subplot(gs[0])
ax.plot(d, linewidth=2)
wt.artists.set_ax_labels(xlabel=d.axes[0].label, ylabel="counts")
ax.set_title(d.natural_name)
ax.set_xlim(d.axes[0].min(), d.axes[0].max())
ax.grid()
return fig, gs
def confocal_scan_plot(d, sideplot=False):
# d = d.copy()
# d.create_channel(name='trace_log', values=np.sqrt(d['trace'][:]))
# d.create_channel(name='retrace_log', values=np.sqrt(d['retrace'][:]))
vmin = min(d["trace"].min(), d["retrace"].min())
vmax = max(d["trace"].max(), d["retrace"].max())
fig, gs = wt.artists.create_figure(width="double", cols=[1, 1, "cbar"])
axs = [plt.subplot(gs[i]) for i in range(2)]
axs[0].pcolor(d, channel="trace", vmin=vmin, vmax=vmax)
axs[0].set_title("trace")
axs[1].pcolor(d, channel="retrace", vmin=vmin, vmax=vmax)
axs[1].set_title("retrace")
wt.artists.set_fig_labels(
xlabel="$\\mathsf{x \\; (\\mu m)}$",
ylabel="$\\mathsf{y \\; (\\mu m)}$",
title=d.natural_name,
)
cax = plt.subplot(gs[-1])
ticks = np.linspace(vmin, vmax, 2)
wt.artists.plot_colorbar(cax, ticks=ticks)
if sideplot:
first = [d.x.points, d.y.points, d.trace.points.T]
second = [d.x.points, d.y.points, d.retrace.points.T]
for ax, arrs_to_bin in zip(axs, [first, second]):
wt.artists.add_sideplot(ax, along="x", arrs_to_bin=arrs_to_bin)
wt.artists.add_sideplot(ax, along="y", arrs_to_bin=arrs_to_bin)
return fig, gs | /research_kit-0.1.1.tar.gz/research_kit-0.1.1/research_kit/artists.py | 0.549641 | 0.528412 | artists.py | pypi |
from functools import cached_property
from itertools import product
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.metrics import SCORERS
from sklearn.utils.validation import check_is_fitted
from ..utils import check_datasets, check_oversamplers_classifiers, check_random_states
from ..model_selection import ModelSearchCV
def _select_name(names, model):
return [name for name in names if set(name.split('_')).issubset(model.split('_'))][
0
]
def _col_mapping(cols, suffix):
return dict(zip(cols, [f'{col}_{suffix}' for col in cols]))
class ImbalancedClassificationExperiment(BaseEstimator):
"""Define a classification experiment on multiple imbalanced datasets.
ImbalancedClassificationExperiment implements a "fit" method tha applies nested
cross-validation to a collection of oversamplers, classifiers and their parameters.
Read more in the :ref:`User Guide <user_guide>`.
Parameters
----------
oversamplers : list of (string, oversampler, param_grid) tuples
Each oversampler is assumed to implement the imbalanced-learn
oversampler interface.
classifiers : list of (string, classifier, param_grid) tuples
Each classifier is assumed to implement the scikit-learn
estimator interface.
scoring : string, callable, list/tuple, dict or None, optional (default=None)
A single string or a callable to evaluate the predictions on the
test sets and report the results.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
If ``None``, a default scorer is used.
n_splits : int, default=5
Number of folds for StratifiedKFold cross-validation. Must be at least 2.
n_runs : int, default=1
Number of experiment runs. Must be at least 1.
random_state : int, RandomState instance, default=None
Control the randomization of the algorithm.
- If int, ``random_state`` is the seed used by the random number generator.
- If ``RandomState`` instance, random_state is the random number generator.
- If ``None``, the random number generator is the ``RandomState`` instance used by ``np.random``.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
- When ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
- When ``-1`` means using all processors.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
Attributes
----------
result_tbl_ : object of :class:`pandas.DataFrame` class
A dataframe that contains the results of the experiment. Each column
includes the dataset name, the name of the oversampler and classifier,
the metric and the mean/standard deviation score across runs.
result_wide_tbl_ : object of :class:`pandas.DataFrame` class
A dataframe that contains the results of the experiment in a wide format.
It also contains the ranking of the oversampler for each dataset.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer, load_iris
>>> from sklearn.tree import DecisionTreeClassifier
>>> from imblearn.over_sampling import RandomOverSampler, SMOTE
>>> from rlearn.experiment import ImbalancedClassificationExperiment
>>> datasets = [
... ('breast_cancer', load_breast_cancer(return_X_y=True)),
... ('iris', load_iris(return_X_y=True))
... ]
>>> oversamplers = [('random_over', RandomOverSampler(), {}), ('smt', SMOTE(), {'k_neighbors': [3, 5]})]
>>> classifiers = [('tree', DecisionTreeClassifier(), {'max_depth': [3,5, 8]})]
>>> experiment = ImbalancedClassificationExperiment(
... oversamplers=oversamplers,
... classifiers=classifiers,
... random_state=12
... )
>>> experiment.fit(datasets)
ImbalancedClassificationExperiment(classifiers=[('tree',...
>>> experiment.results_tbl_
dataset_name oversampler classifier metric mean_score std_score
0 breast_cancer random_over tree accuracy 0.926168 0.009979
1 breast_cancer smt tree accuracy 0.942804 0.003777
2 iris random_over tree accuracy 0.943333 0.004714
3 iris smt tree accuracy 0.943333 0.004714
>>> experiment.results_wide_tbl_
dataset_name classifier metric random_over_score smt_score random_over_rank smt_rank
0 breast_cancer tree accuracy 0.926168 0.942804 2.0 1.0
1 iris tree accuracy 0.943333 0.943333 1.5 1.5
"""
GROUP_KEYS = ['dataset_name', 'oversampler', 'classifier']
def __init__(
self,
oversamplers,
classifiers,
scoring=None,
n_splits=5,
n_runs=2,
random_state=None,
n_jobs=-1,
verbose=0,
):
self.oversamplers = oversamplers
self.classifiers = classifiers
self.scoring = scoring
self.n_splits = n_splits
self.n_runs = n_runs
self.random_state = random_state
self.n_jobs = n_jobs
self.verbose = verbose
def _check_estimators(self):
"""Check the estimators."""
estimators, param_grids = check_oversamplers_classifiers(
self.oversamplers, self.classifiers
)
self.estimators_ = []
for name, estimator in estimators:
selected_param_grids = []
for param_grid in param_grids:
selected_param_grid = {}
if param_grid['est_name'][0] == name:
for param_name, vals in param_grid.items():
if param_name != 'est_name':
selected_param_grid[
param_name.replace(f'{name}__', '')
] = vals
selected_param_grids.append(selected_param_grid)
self.estimators_.append(
(name, GridSearchCV(estimator, selected_param_grids))
)
return self
def _check_mscv(self):
self.mscv_ = ModelSearchCV(
self.estimators_,
{},
scoring=self.scoring,
refit=False,
cv=StratifiedKFold(
n_splits=self.n_splits, shuffle=True, random_state=self.random_state
),
return_train_score=False,
n_jobs=self.n_jobs,
verbose=self.verbose,
)
return self
def _check_cols(self, datasets):
# Extract names
self.oversamplers_names_, *_ = zip(*self.oversamplers)
self.classifiers_names_, *_ = zip(*self.classifiers)
self.datasets_names_, _ = zip(*datasets)
# Extract scoring columns
if isinstance(self.scoring, list):
self.scoring_cols_ = self.scoring
elif isinstance(self.scoring, str):
self.scoring_cols_ = [self.scoring]
elif isinstance(self.scoring, dict):
self.scoring_cols_ = list(self.scoring.keys())
else:
self.scoring_cols_ = (
['accuracy']
if self.mscv_.estimator._estimator_type == 'classifier'
else ['r2']
)
return self
def _check_random_states_mapping(self):
self.random_states_mapping_ = {
run_id: random_state
for run_id, random_state in enumerate(
check_random_states(self.random_state, self.n_runs)
)
}
return self
def _set_random_state(self, run_id):
random_state = self.random_states_mapping_[run_id]
for _, estimator in self.mscv_.get_params()['estimators']:
for param_name in estimator.get_params().keys():
if param_name.endswith('__random_state'):
estimator.set_params(**{param_name: random_state})
return self
def _fit(self, run_id, dataset_name, X, y):
# Fit model search
self._set_random_state(run_id).mscv_.fit(X, y)
# Get results
result = pd.DataFrame(self.mscv_.cv_results_)
result = result.assign(run_id=run_id, dataset_name=dataset_name)
scoring_cols = [col for col in result.columns.tolist() if 'mean_test' in col]
result.rename(columns=dict(zip(scoring_cols, self.scoring_cols_)), inplace=True)
result = result.loc[
:, ['run_id', 'dataset_name', 'param_est_name'] + self.scoring_cols_
]
result.loc[:, 'param_est_name'] = result.loc[:, 'param_est_name'].apply(
lambda model: [
_select_name(self.oversamplers_names_, model),
_select_name(self.classifiers_names_, model),
]
)
result[['oversampler', 'classifier']] = pd.DataFrame(
result['param_est_name'].values.tolist()
)
result.drop(columns='param_est_name', inplace=True)
result = result.melt(
id_vars=['run_id'] + self.GROUP_KEYS,
value_vars=self.scoring_cols_,
var_name='metric',
value_name='score',
)
return result
def fit(self, datasets):
"""Fit the experiment to the datasets.
Parameters
----------
datasets : list of (name, (X, y)) tuples
The datasets that are used to fit the oversamplers
and classifiers of the experiment.
Returns
-------
self : object
Return the instance itself.
"""
# Checks
datasets = check_datasets(datasets)
self._check_estimators()._check_mscv()._check_cols(
datasets
)._check_random_states_mapping()
# Get results
results = Parallel(n_jobs=self.n_jobs)(
delayed(self._fit)(run_id, dataset_name, X, y)
for run_id, (dataset_name, (X, y)) in product(range(self.n_runs), datasets)
)
results = pd.concat(results, ignore_index=True)
# Calculate results
self.results_tbl_ = (
results.groupby(self.GROUP_KEYS + ['metric'])
.score.agg(mean_score='mean', std_score='std')
.reset_index()
)
return self
@staticmethod
def _return_row_ranking(row, sign):
"""Returns the ranking of values. In case of tie, each ranking value
is replaced with its group average."""
# Calculate ranking
ranking = (sign * row).argsort().argsort().astype(float)
# Break the tie
groups = np.unique(row, return_inverse=True)[1]
for group_label in np.unique(groups):
indices = groups == group_label
ranking[indices] = ranking[indices].mean()
return ranking.size - ranking
@cached_property
def results_wide_tbl_(self):
check_is_fitted(self, 'results_tbl_')
scores = self.results_tbl_.pivot_table(
index=['dataset_name', 'classifier', 'metric'],
columns=['oversampler'],
values='mean_score',
).reset_index()
scores.rename_axis(None, axis=1, inplace=True)
ranks = scores.apply(
lambda row: self._return_row_ranking(
row[3:], SCORERS[row[2].replace(' ', '_').lower()]._sign
),
axis=1,
)
scores.rename(columns=_col_mapping(scores.columns[3:], 'score'), inplace=True)
ranks.rename(columns=_col_mapping(ranks.columns, 'rank'), inplace=True)
tbl = pd.concat([scores, ranks], axis=1)
return tbl | /research_learn-0.3.1-py3-none-any.whl/rlearn/experiment/_imbalanced.py | 0.92204 | 0.467514 | _imbalanced.py | pypi |
import pandas as pd
from sklearn.utils.validation import check_is_fitted
from ...model_selection import ModelSearchCV
def report_model_search_results(model_search_cv, sort_results=None):
"""Generate a model search report of results.
Parameters
----------
model_search_cv : object of :class:`ModelSearchCV` class
The fitted ModelSearchCV object.
sort_results : str or None
The parameter to use to sort the results.
Returns
-------
report : object of :class:`pd.DataFrame` class
A report as a pandas DataFrame.
"""
# Check model_search_cv parameter
if not isinstance(model_search_cv, ModelSearchCV):
raise ValueError(
'Parameter `model_search_cv` should be a ModelSearchCV instance.'
)
# Check if object is fitted
check_is_fitted(model_search_cv, 'cv_results_')
# Select columns
columns = ['param_est_name'] + [
results_param
for results_param in model_search_cv.cv_results_.keys()
if 'mean_test' in results_param or results_param == 'mean_fit_time'
]
# select results
results = {
results_param: values
for results_param, values in model_search_cv.cv_results_.items()
if results_param in columns
}
# Generate report table
report = pd.DataFrame(results, columns=columns)
# Sort results
if sort_results is not None:
# Use sort_results parameter as the sorting key
try:
report = report.sort_values(
sort_results, ascending=(sort_results == 'mean_fit_time')
).reset_index(drop=True)
# Key error
except KeyError:
# Define error message
if isinstance(model_search_cv.scoring, list):
options = ', '.join(
['mean_test_%s' % sc for sc in model_search_cv.scoring]
)
else:
options = 'mean_test_score'
error_msg = (
f'Parameter `sort_results` should be one of mean_fit_score, {options}.'
)
# Raise custom error
raise KeyError(error_msg)
return report | /research_learn-0.3.1-py3-none-any.whl/rlearn/reporting/_search/_results.py | 0.91372 | 0.42668 | _results.py | pypi |
import warnings
from scipy.stats import friedmanchisquare, ttest_rel
from statsmodels.stats.multitest import multipletests
import pandas as pd
def _extract_pvalue(df):
"""Extract the p-value."""
results = friedmanchisquare(*df.iloc[:, 3:].transpose().values.tolist())
return results.pvalue
def apply_friedman_test(imbalanced_experiment, alpha=0.05):
"""Apply the Friedman test across datasets for every
combination of classifiers and metrics.
Parameters
----------
imbalanced_experiment : object of :class:`rlearn.experiment.ImbalancedClassificationExperiment` class
The fitted imbalanced experiment object.
Returns
-------
friedman_test : object of :class:`pd.DataFrame` class
The results of the test as a pandas DataFrame.
"""
# Calculate ranking results
ovrs_names = imbalanced_experiment.results_wide_tbl_.columns[3:]
# Apply test for more than two oversamplers
if len(ovrs_names) < 3:
warnings.warn(
'More than two oversamplers are required apply the Friedman test.'
)
# Calculate p-values
friedman_test = (
imbalanced_experiment.results_wide_tbl_.groupby(['classifier', 'metric'])
.apply(_extract_pvalue)
.reset_index()
.rename(columns={0: 'p_value'})
)
# Compare p-values to significance level
friedman_test['significance'] = friedman_test['p_value'] < alpha
return friedman_test
def apply_holms_test(imbalanced_experiment, control_oversampler=None):
"""Use the Holm's method to adjust the p-values of a paired difference
t-test for every combination of classifiers and metrics using a control
oversampler.
Parameters
----------
imbalanced_experiment : object of :class:`rlearn.experiment.ImbalancedClassificationExperiment` class
The fitted imbalanced experiment object.
control_oversampler : str or None, default=None
The name of the control oversampler. The default oversampler is the last one.
Returns
-------
holms_test : object of :class:`pd.DataFrame` class
The results of the test as a pandas DataFrame.
"""
# Calculate wide optimal results
ovrs_names = list(imbalanced_experiment.results_wide_tbl_.columns[3:])
# Apply test for more than one oversampler
if len(ovrs_names) < 2:
warnings.warn('More than one oversampler is required to apply the Holms test.')
# Use the last if no control oversampler is provided
if control_oversampler is None:
control_oversampler = ovrs_names[-1]
ovrs_names.remove(control_oversampler)
# Define empty p-values table
pvalues = pd.DataFrame()
# Populate p-values table
for name in ovrs_names:
pvalues_pair = imbalanced_experiment.results_wide_tbl_.groupby(
['classifier', 'metric']
)[[name, control_oversampler]].apply(
lambda df: ttest_rel(df[name], df[control_oversampler])[1]
)
pvalues_pair = pd.DataFrame(pvalues_pair, columns=[name])
pvalues = pd.concat([pvalues, pvalues_pair], axis=1)
# Corrected p-values
holms_test = pd.DataFrame(
pvalues.apply(
lambda col: multipletests(col, method='holm')[1], axis=1
).values.tolist(),
columns=ovrs_names,
)
holms_test = holms_test.set_index(pvalues.index).reset_index()
return holms_test | /research_learn-0.3.1-py3-none-any.whl/rlearn/reporting/_imbalanced/_statistics.py | 0.899031 | 0.684607 | _statistics.py | pypi |
from itertools import product
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils import check_random_state, check_X_y
from sklearn.model_selection._search import ParameterGrid
from imblearn.pipeline import Pipeline
def check_random_states(random_state, repetitions):
"""Create random states for experiments."""
random_state = check_random_state(random_state)
return [
random_state.randint(0, 2**32 - 1, dtype='uint32') for _ in range(repetitions)
]
def check_param_grids(param_grids, est_names):
"""Check the parameters grids to use with
parametrized estimators."""
# Check the parameters grids
flat_param_grids = [
param_grid for param_grid in list(ParameterGrid(param_grids)) if param_grid
]
# Append existing estimators names
param_grids = []
for param_grid in flat_param_grids:
# Get estimator name
est_name = param_grid.pop('est_name', None)
# Modify values
param_grid = {param: [val] for param, val in param_grid.items()}
# Check estimators prefixes
params_prefixes = set([param.split('__')[0] for param in param_grid.keys()])
if not params_prefixes.issubset(est_names):
raise ValueError(
'Parameters prefixes are not subset of parameter `est_names`.'
)
if len(params_prefixes) > 1:
raise ValueError('Parameters prefixes are not unique.')
if est_name is not None and len(params_prefixes.union([est_name])) > 1:
raise ValueError(
'Parameters prefixes and parameter `est_name` are not unique.'
)
param_grid['est_name'] = (
[est_name] if est_name is not None else list(params_prefixes)
)
# Append parameter grid
param_grids.append(param_grid)
# Append missing estimators names
current_est_names = set([param_grid['est_name'][0] for param_grid in param_grids])
missing_est_names = set(est_names).difference(current_est_names)
for est_name in missing_est_names:
param_grids.append({'est_name': [est_name]})
return param_grids
def check_oversamplers_classifiers(oversamplers, classifiers):
"""Extract estimators and parameters grids."""
# Create estimators and parameter grids
estimators, param_grids = [], []
for oversampler, classifier in product(oversamplers, classifiers):
# Unpack oversamplers and classifiers
ovs_name, ovs, ovs_param_grid = oversampler
clf_name, clf, clf_param_grid = classifier
if ovs is None:
ovs = FunctionTransformer()
# Create estimator
name = f'{ovs_name}_{clf_name}'
ovs_steps = ovs.steps if isinstance(ovs, Pipeline) else [(ovs_name, ovs)]
clf_steps = clf.steps if isinstance(clf, Pipeline) else [(clf_name, clf)]
steps = ovs_steps + clf_steps
estimators.append((name, Pipeline(steps)))
# Create parameter grid
ovs_prefix = f'{name}' if isinstance(ovs, Pipeline) else f'{name}__{ovs_name}'
ovs_param_grid = [
{f'{ovs_prefix}__{param}': [val] for param, val in param_grid.items()}
for param_grid in ParameterGrid(ovs_param_grid)
]
clf_prefix = f'{name}' if isinstance(clf, Pipeline) else f'{name}__{clf_name}'
clf_param_grid = [
{f'{clf_prefix}__{param}': [val] for param, val in param_grid.items()}
for param_grid in ParameterGrid(clf_param_grid)
]
for param_grid1, param_grid2 in product(ovs_param_grid, clf_param_grid):
param_grid = {**param_grid1, **param_grid2, 'est_name': [name]}
param_grids.append(param_grid)
return estimators, param_grids
def check_datasets(datasets):
"""Check that datasets is a list of (dataset_name, (X, y)) pairs."""
try:
# Get datasets names
datasets_names = [dataset_name for dataset_name, _ in datasets]
# Check if datasets names are unique strings
are_all_strings = all(
[isinstance(dataset_name, str) for dataset_name in datasets_names]
)
are_unique = len(list(datasets_names)) == len(set(datasets_names))
if not are_all_strings or not are_unique:
raise ValueError('The datasets names should be unique strings.')
except TypeError:
raise TypeError(
'The datasets should be a list ' 'of (dataset name, (X, y)) pairs.'
)
datasets = [(name, check_X_y(X, y)) for name, (X, y) in datasets]
return datasets
def check_estimator_type(estimators):
"""Returns the type of estimators."""
estimator_types = set([estimator._estimator_type for _, estimator in estimators])
if len(estimator_types) > 1:
raise ValueError(
'Both classifiers and regressors were found. '
'A single estimator type should be included.'
)
return estimator_types.pop() | /research_learn-0.3.1-py3-none-any.whl/rlearn/utils/_validation.py | 0.909235 | 0.529689 | _validation.py | pypi |
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from sklearn.utils.metaestimators import _BaseComposition
from sklearn.utils.validation import check_is_fitted
from sklearn.model_selection import GridSearchCV
from ..utils import check_estimator_type, check_param_grids
class MultiEstimatorMixin(_BaseComposition):
"""Mixin class for multi estimator."""
def __init__(self, estimators, est_name=None):
self.estimators = estimators
self.est_name = est_name
def _validate_estimators(self):
error_msg = 'Invalid `estimators` attribute, `estimators` should'
' be a list of (string, estimator) tuples.'
try:
if len(self.estimators) == 0:
raise TypeError(error_msg)
for name, est in self.estimators:
is_str = isinstance(name, str)
is_est = isinstance(est, BaseEstimator)
if not (is_str and is_est):
raise TypeError(error_msg)
except TypeError:
raise TypeError(error_msg)
self.est_names_ = [est_name for est_name, _ in self.estimators]
super(MultiEstimatorMixin, self)._validate_names(self.est_names_)
if self.est_name not in self.est_names_:
raise ValueError(
f'Attribute `est_name` should be one of {", ".join(self.est_names_)}. '
f'Got `{self.est_name}` instead.'
)
def set_params(self, **params):
"""Set the parameters.
Valid parameter keys can be listed with get_params().
Parameters
----------
params : keyword arguments
Specific parameters using e.g. set_params(parameter_name=new_value)
In addition, to setting the parameters of the ``MultiEstimatorMixin``,
the individual estimators of the ``MultiEstimatorMixin`` can also be
set or replaced by setting them to None.
"""
super(MultiEstimatorMixin, self)._set_params('estimators', **params)
return self
def get_params(self, deep=True):
"""Get the parameters.
Parameters
----------
deep: bool
Setting it to True gets the various estimators and the parameters
of the estimators as well
"""
return super(MultiEstimatorMixin, self)._get_params('estimators', deep=deep)
def fit(self, X, y, **fit_params):
"""Fit the selected estimator."""
# Validate estimators
self._validate_estimators()
# Copy one of the estimators
estimator = clone(dict(self.estimators)[self.est_name])
# Fit estimator
self.estimator_ = estimator.fit(X, y, **fit_params)
return self
def predict(self, X):
"""Predict with the selected estimator."""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
class MultiClassifier(MultiEstimatorMixin, ClassifierMixin):
"""The functionality of a collection of classifiers is provided as
a single metaclassifier. The classifier to be fitted is selected using a
parameter."""
def predict_proba(self, X):
"""Predict the probability with the selected estimator."""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict_proba(X)
class MultiRegressor(MultiEstimatorMixin, RegressorMixin):
"""The functionality of a collection of regressors is provided as
a single metaregressor. The regressor to be fitted is selected using a
parameter."""
pass
class ModelSearchCV(GridSearchCV):
"""Exhaustive search over specified parameter values for a collection of estimators.
Important members are fit, predict.
ModelSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimators used.
The parameters of the estimators used to apply these methods are optimized
by cross-validated grid-search over their parameter grids.
Read more in the :ref:`User Guide <user_guide>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Each estimator is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grids : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable, list/tuple, dict or None, optional (default=None)
A single string or a callable to evaluate the predictions on the
test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
If ``None``, a default scorer is used.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
- When ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
- When ``-1`` means using all processors.
pre_dispatch : int or string, optional (default=None)
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- ``None``, in which case all the jobs are immediately created.
- An int, giving the exact number of total jobs that are spawned.
- A string, as a function of n_jobs i.e. ``'2*n_jobs'``.
cv : int, cross-validation generator or an iterable, optional (defalut=None)
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- ``None``, to use the default 3-fold cross validation.
- integer, to specify the number of folds in a ``(Stratified)KFold``.
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
refit : boolean, string, or callable, optional (default=True)
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``. In that
case, the ``best_estimator_`` and ``best_parameters_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be availble.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``ModelSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
verbose : integer, optional (default=0)
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric, optional (default=np.nan)
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``.
return_train_score : boolean, optional (default=False)
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.tree import DecisionTreeClassifier
>>> from sklearn.neighbors import KNeighborsClassifier
>>> from rlearn.model_selection import ModelSearchCV
>>> X, y, *_ = load_breast_cancer().values()
>>> param_grids = [{'dt__max_depth': [3, 6]}, {'kn__n_neighbors': [3, 5]}]
>>> estimators = [('dt', DecisionTreeClassifier()), ('kn', KNeighborsClassifier())]
>>> model_search_cv = ModelSearchCV(estimators, param_grids)
>>> model_search_cv.fit(X, y)
ModelSearchCV(...)
>>> sorted(model_search_cv.cv_results_.keys())
['mean_fit_time', 'mean_score_time', 'mean_test_score',...]
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+-------------------+-----------+------------+-----------------+---+---------+
|param_dtc_criterion|param_gamma|param_degree|split0_test_score|...|rank_t...|
+===================+===========+============+=================+===+=========+
| 'entropy' | -- | 2 | 0.80 |...| 2 |
+-------------------+-----------+------------+-----------------+---+---------+
| 'entropy' | -- | 3 | 0.70 |...| 4 |
+-------------------+-----------+------------+-----------------+---+---------+
| 'entropy' | 0.1 | -- | 0.80 |...| 3 |
+-------------------+-----------+------------+-----------------+---+---------+
| 'entropy' | 0.2 | -- | 0.93 |...| 1 |
+-------------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.80, 0.70, 0.80, 0.93],
'split1_test_score' : [0.82, 0.50, 0.70, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.85],
'std_test_score' : [0.01, 0.10, 0.05, 0.08],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.80, 0.92, 0.70, 0.93],
'split1_train_score' : [0.82, 0.55, 0.70, 0.87],
'mean_train_score' : [0.81, 0.74, 0.70, 0.90],
'std_train_score' : [0.01, 0.19, 0.00, 0.03],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04, 0.04],
'std_score_time' : [0.00, 0.00, 0.00, 0.01],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is present only if ``refit`` is
specified.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
"""
def __init__(
self,
estimators,
param_grids,
scoring=None,
n_jobs=None,
refit=True,
cv=5,
verbose=0,
pre_dispatch='2*n_jobs',
error_score='raise',
return_train_score=False,
):
estimator = (
MultiClassifier(estimators)
if check_estimator_type(estimators) == 'classifier'
else MultiRegressor(estimators)
)
est_names = [est_name for est_name, _ in estimators]
param_grid = check_param_grids(param_grids, est_names)
super(ModelSearchCV, self).__init__(
estimator=estimator,
param_grid=param_grid,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
error_score=error_score,
return_train_score=return_train_score,
)
self.estimators = estimators
self.param_grids = param_grids
def fit(self, X, y=None, groups=None, **fit_params):
# Call superclass fit method
super(ModelSearchCV, self).fit(X, y, groups=groups, **fit_params)
# Recreate best estimator attribute
if hasattr(self, 'best_estimator_'):
self.best_estimator_ = self.best_estimator_.estimator_
return self | /research_learn-0.3.1-py3-none-any.whl/rlearn/model_selection/_search.py | 0.955909 | 0.557966 | _search.py | pypi |
import numpy as np
import cv2
def center_crop(X, out_height, out_width):
_, in_height, in_width, _ = X.shape
assert out_height <= in_height and out_width <= in_width
start_i = (in_height - out_height) / 2
start_j = (in_width - out_width) / 2
out_X = X[:, start_i:start_i + out_height, start_j:start_j + out_width, :]
return out_X
# random crops for each of the images.
def random_crop(X, out_height, out_width):
num_examples, in_height, in_width, _ = X.shape
# the ouput dimensions have to be smaller or equal that the input dimensions.
assert out_height <= in_height and out_width <= in_width
start_is = np.random.randint(in_height - out_height + 1, size=num_examples)
start_js = np.random.randint(in_width - out_width + 1, size=num_examples)
out_X = []
for ind in range(num_examples):
st_i = start_is[ind]
st_j = start_js[ind]
out_Xi = X[ind, st_i:st_i + out_height, st_j:st_j + out_width, :]
out_X.append(out_Xi)
out_X = np.array(out_X)
return out_X
def random_flip_left_right(X, p_flip):
num_examples, _, _, _ = X.shape
out_X = X.copy()
flip_mask = np.random.random(num_examples) < p_flip
out_X[flip_mask] = out_X[flip_mask, :, ::-1, :]
return out_X
def per_image_whiten(X):
num_examples, _, _, _ = X.shape
X_flat = X.reshape((num_examples, -1))
X_mean = X_flat.mean(axis=1)
X_cent = X_flat - X_mean[:, None]
X_norm = np.sqrt(np.sum(X_cent * X_cent, axis=1))
X_out = X_cent / X_norm[:, None]
X_out = X_out.reshape(X.shape)
return X_out
# Assumes the following ordering for X: (num_images, height, width, num_channels)
def zero_pad_border(X, pad_size):
n, height, width, num_channels = X.shape
X_padded = np.zeros(
(n, height + 2 * pad_size, width + 2 * pad_size, num_channels),
dtype='float32')
X_padded[:, pad_size:height + pad_size, pad_size:width + pad_size, :] = X
return X_padded
# TODO: check that this works in the case of images with larger number of channels.
def random_scale_rotate(X, angle_min, angle_max, scale_min, scale_max):
n, height, width, channels = X.shape
scales = np.random.uniform(scale_min, scale_max, size=n)
angles = np.random.uniform(angle_min, angle_max, size=n)
out_lst = []
rot_center = (height / 2, width / 2)
for i in range(n):
A = cv2.getRotationMatrix2D(rot_center, angles[i], scales[i])
out = cv2.warpAffine(X[i], A, (width, height))
out_lst.append(out)
Xout = np.stack(out_lst)
# it seems that if there is a single channel, it disappears.
if channels == 1:
Xout = np.expand_dims(Xout, 3)
return Xout | /research_toolbox-0.1-py3-none-any.whl/research_toolbox/tb_augmentation.py | 0.549157 | 0.517022 | tb_augmentation.py | pypi |
import random
import numpy as np
import research_toolbox.tb_utils as tb_ut
def shuffle(xs):
idxs = list(range(len(xs)))
random.shuffle(idxs)
return [xs[i] for i in idxs]
def random_permutation(n):
idxs = list(range(n))
random.shuffle(idxs)
return idxs
def argsort(xs, fns, increasing=True):
"""The functions in fns are used to compute a key which are then used to
construct a tuple which is then used to sort. The earlier keys are more
important than the later ones.
"""
def key_fn(x):
return tuple([f(x) for f in fns])
idxs, _ = tb_ut.zip_toggle(
sorted(enumerate(xs),
key=lambda x: key_fn(x[1]),
reverse=not increasing))
return idxs
def sort(xs, fns, increasing=True):
idxs = argsort(xs, fns, increasing)
return apply_permutation(xs, idxs)
def apply_permutation(xs, idxs):
assert len(set(idxs).intersection(list(range(len(xs))))) == len(xs)
return [xs[i] for i in idxs]
def apply_inverse_permutation(xs, idxs):
assert len(set(idxs).intersection(list(range(len(xs))))) == len(xs)
out_xs = [None] * len(xs)
for i_from, i_to in enumerate(idxs):
out_xs[i_to] = xs[i_from]
return out_xs
def shuffle_tied(xs_lst):
assert len(xs_lst) > 0 and len(list(map(len, xs_lst))) == 1
n = len(xs_lst[0])
idxs = random_permutation(n)
ys_lst = [apply_permutation(xs, idxs) for xs in xs_lst]
return ys_lst
def set_random_seed(x=None):
np.random.seed(x)
random.seed(x)
def uniform_sample_product(lst_lst_vals, num_samples):
n = len(lst_lst_vals)
components = []
for i, lst in enumerate(lst_lst_vals):
idxs = np.random.randint(len(lst), size=num_samples)
components.append([lst[j] for j in idxs])
samples = []
for i in range(num_samples):
samples.append([components[j][i] for j in range(n)])
return samples
def uniformly_random_indices(num_indices, num_samples, with_replacement):
return np.random.choice(num_indices,
num_samples,
replace=with_replacement,
p=probs)
def random_indices_with_probs(num_indices, num_samples, with_replacement,
probs):
return np.random.choice(num_indices,
num_samples,
replace=with_replacement,
p=probs) | /research_toolbox-0.1-py3-none-any.whl/research_toolbox/tb_random.py | 0.466603 | 0.411643 | tb_random.py | pypi |
import os
import shutil
import uuid
import research_toolbox.tb_utils as tb_ut
def path_prefix(path):
return os.path.split(path)[0]
def path_last_element(path):
return os.path.split(path)[1]
def path_relative_to_absolute(path):
return os.path.abspath(path)
def path_exists(path):
return os.path.exists(path)
def file_exists(path):
return os.path.isfile(path)
def folder_exists(path):
return os.path.isdir(path)
def create_file(filepath, abort_if_exists=True, create_parent_folders=False):
assert create_parent_folders or folder_exists(path_prefix(filepath))
assert not (abort_if_exists and file_exists(filepath))
if create_parent_folders:
create_folder(
path_prefix(filepath),
abort_if_exists=False,
create_parent_folders=True)
with open(filepath, 'w'):
pass
def create_folder(folderpath, abort_if_exists=True,
create_parent_folders=False):
assert not file_exists(folderpath)
assert create_parent_folders or folder_exists(path_prefix(folderpath))
assert not (abort_if_exists and folder_exists(folderpath))
if not folder_exists(folderpath):
os.makedirs(folderpath)
def copy_file(src_filepath,
dst_filepath,
abort_if_dst_exists=True,
create_parent_folders=False):
assert file_exists(src_filepath)
assert src_filepath != dst_filepath
assert not (abort_if_dst_exists and file_exists(dst_filepath))
dst_folderpath = path_prefix(dst_filepath)
assert create_parent_folders or folder_exists(dst_folderpath)
if not folder_exists(dst_folderpath):
create_folder(dst_folderpath, create_parent_folders=True)
shutil.copyfile(src_filepath, dst_filepath)
def copy_folder(src_folderpath,
dst_folderpath,
ignore_hidden_files=False,
ignore_hidden_folders=False,
ignore_file_exts=None,
abort_if_dst_exists=True,
create_parent_folders=False):
assert folder_exists(src_folderpath)
assert src_folderpath != dst_folderpath
assert not (abort_if_dst_exists and folder_exists(dst_folderpath))
if (not abort_if_dst_exists) and folder_exists(dst_folderpath):
delete_folder(dst_folderpath, abort_if_nonempty=False)
pref_dst_fo = path_prefix(dst_folderpath)
assert create_parent_folders or folder_exists(pref_dst_fo)
create_folder(dst_folderpath, create_parent_folders=create_parent_folders)
# create all folders in the destination.
fos = list_folders(
src_folderpath,
use_relative_paths=True,
recursive=True,
ignore_hidden_folders=ignore_hidden_folders)
for fo in fos:
fo_path = join_paths([dst_folderpath, fo])
create_folder(fo_path, create_parent_folders=True)
# copy all files to the destination.
kwargs = tb_ut.subset_dict_via_selection(
locals(),
['ignore_hidden_folders', 'ignore_hidden_files', 'ignore_file_exts'])
fis = list_files(
src_folderpath, use_relative_paths=True, recursive=True, **kwargs)
for fi in fis:
src_fip = join_paths([src_folderpath, fi])
dst_fip = join_paths([dst_folderpath, fi])
copy_file(src_fip, dst_fip)
def delete_file(filepath, abort_if_notexists=True):
assert file_exists(filepath) or (not abort_if_notexists)
if file_exists(filepath):
os.remove(filepath)
def delete_folder(folderpath, abort_if_nonempty=True, abort_if_notexists=True):
assert folder_exists(folderpath) or (not abort_if_notexists)
if folder_exists(folderpath):
assert len(os.listdir(folderpath)) == 0 or (not abort_if_nonempty)
shutil.rmtree(folderpath)
else:
assert not abort_if_notexists
def list_paths(folderpath,
ignore_files=False,
ignore_dirs=False,
ignore_hidden_folders=True,
ignore_hidden_files=True,
ignore_file_exts=None,
recursive=False,
use_relative_paths=False):
assert folder_exists(folderpath)
path_list = []
# enumerating all desired paths in a directory.
for root, dirs, files in os.walk(folderpath):
if ignore_hidden_folders:
dirs[:] = [d for d in dirs if not d[0] == '.']
if ignore_hidden_files:
files = [f for f in files if not f[0] == '.']
if ignore_file_exts != None:
files = [
f for f in files
if not any([f.endswith(ext) for ext in ignore_file_exts])
]
# get only the path relative to this path.
if not use_relative_paths:
pref_root = root
else:
pref_root = os.path.relpath(root, folderpath)
if not ignore_files:
path_list.extend([join_paths([pref_root, f]) for f in files])
if not ignore_dirs:
path_list.extend([join_paths([pref_root, d]) for d in dirs])
if not recursive:
break
return path_list
def list_files(folderpath,
ignore_hidden_folders=True,
ignore_hidden_files=True,
ignore_file_exts=None,
recursive=False,
use_relative_paths=False):
kwargs = tb_ut.subset_dict_via_selection(locals(), [
'recursive', 'ignore_hidden_folders', 'ignore_hidden_files',
'ignore_file_exts', 'use_relative_paths'
])
return list_paths(folderpath, ignore_dirs=True, **kwargs)
def list_folders(folderpath,
ignore_hidden_folders=True,
recursive=False,
use_relative_paths=False):
kwargs = tb_ut.subset_dict_via_selection(
locals(), ['recursive', 'ignore_hidden_folders', 'use_relative_paths'])
return list_paths(folderpath, ignore_files=True, **kwargs)
def list_leaf_folders(root_folderpath, ignore_hidden_folders=True):
def iter_fn(folderpath, leaf_folderpath_lst):
child_folderpath_lst = list_folders(
folderpath, ignore_hidden_folders=ignore_hidden_folders)
if len(child_folderpath_lst) > 0:
for p in child_folderpath_lst:
iter_fn(p, leaf_folderpath_lst)
else:
leaf_folderpath_lst.append(folderpath)
return leaf_folderpath_lst
return iter_fn(root_folderpath, [])
# NOTE: I think that this function is more general than list_leaf_folders.
def list_folders_conditionally(root_folderpath,
cond_fn,
ignore_hidden_folders=True):
"""Descends down the directory tree rooted at the specified folder path, tests a
condition at each node, and stops the descent down that particular path if
the condition evaluates to true. The nodes which evaluated to true are
returned in a list.
This function is useful, for example, to work with arbitrarily nested folders
that always end up having some regular folder structure close to the leaves.
"""
def iter_fn(folderpath, lst):
if cond_fn(folderpath):
lst.append(folderpath)
else:
child_folderpath_lst = list_folders(
folderpath, ignore_hidden_folders=ignore_hidden_folders)
for p in child_folderpath_lst:
iter_fn(p, lst)
return lst
return iter_fn(root_folderpath, [])
def join_paths(paths):
return os.path.join(*paths)
def pairs_to_filename(ks, vs, kv_sep='', pair_sep='_', prefix='',
suffix='.txt'):
pairs = [kv_sep.join([k, v]) for (k, v) in zip(ks, vs)]
s = prefix + pair_sep.join(pairs) + suffix
return s
def get_unique_filename(folderpath, fileext):
while True:
filename = uuid.uuid4()
if not file_exists(
join_paths([folderpath,
"%s.%s" % (filename, fileext)])):
return filename
def get_unique_filepath(folderpath, fileext):
filename = get_unique_filename(folderpath, fileext)
return join_paths([folderpath, "%s.%s" % (filename, fileext)])
def get_current_working_directory():
return os.getcwd() | /research_toolbox-0.1-py3-none-any.whl/research_toolbox/tb_filesystem.py | 0.416678 | 0.312593 | tb_filesystem.py | pypi |
import functools
import inspect
import pprint
import itertools
import numpy as np
def powers_of_two(starting_power, ending_power, is_int_type=False):
assert ending_power >= starting_power
return np.logspace(starting_power,
ending_power,
num=ending_power - starting_power + 1,
base=2,
dtype='float64' if is_int_type is False else 'int64')
### auxiliary functions for avoiding boilerplate in in assignments
def set_object_variables(obj, d, abort_if_exists=False,
abort_if_notexists=True):
d_to = vars(obj)
assert (not abort_if_exists) or all([k not in d_to for k in d.keys()])
assert (not abort_if_notexists) or all([k in d_to for k in d.keys()])
for k, v in d.items():
assert not hasattr(obj, k)
setattr(obj, k, v)
return obj
def get_object_variables(obj, varnames, tuple_fmt=False):
d = vars(obj)
if tuple_fmt:
return tuple([d[k] for k in varnames])
else:
return subset_dict_via_selection(d, varnames)
### partial application and other functional primitives
def partial_apply(fn, d):
return functools.partial(fn, **d)
def to_list_fn(f):
return lambda xs: list(map(f, xs))
def transform(x, fns):
for f in fns:
x = f(x)
return x
def zip_toggle(xs):
"""[[x1, ...], [x2, ...], [x3, ...]] --> [(x1, x2, .., xn) ...];
[(x1, x2, .., xn) ...] --> [[x1, ...], [x2, ...], [x3, ...]]"""
assert isinstance(xs, list)
return list(zip(*xs))
### useful iterators
def iter_product(lst_lst_vals, tuple_fmt=True):
vs = list(itertools.product(*lst_lst_vals))
if not tuple_fmt:
vs = list(map(list, vs))
return vs
def iter_ortho_all(lst_lst_vals, reference_idxs, ignore_repeats=True):
assert len(lst_lst_vals) == len(reference_idxs)
ref_r = [lst_lst_vals[pos][idx] for (pos, idx) in enumerate(reference_idxs)]
# put reference first in this case, if ignoring repeats
rs = [] if not ignore_repeats else [tuple(ref_r)]
num_lsts = len(lst_lst_vals)
for i in range(num_lsts):
num_vals = len(lst_lst_vals[i])
for j in range(num_vals):
if ignore_repeats and j == reference_idxs[i]:
continue
r = list(ref_r)
r[i] = lst_lst_vals[i][j]
rs.append(tuple(r))
return rs
def iter_ortho_single(lst_lst_vals,
reference_idxs,
iteration_idx,
put_reference_first=True):
assert len(lst_lst_vals) == len(reference_idxs)
ref_r = [lst_lst_vals[pos][idx] for (pos, idx) in enumerate(reference_idxs)]
rs = [] if not put_reference_first else [tuple(ref_r)]
num_vals = len(lst_lst_vals[iteration_idx])
for j in range(num_vals):
if put_reference_first and j == reference_idxs[iteration_idx]:
continue
r = [lst_lst_vals[pos][idx] for (pos, idx) in enumerate(reference_idxs)]
r[i] = lst_lst_vals[iteration_idx][j]
rs.append(tuple(r))
return rs
def get_argument_names(fn):
return inspect.getargspec(fn).args
### dictionary manipulation
def create_dict(ks, vs):
assert len(ks) == len(vs)
return dict(list(zip(ks, vs)))
def create_dataframe(ds, abort_if_different_keys=True):
import pandas
ks = key_union(ds)
assert (not abort_if_different_keys) or len(key_intersection(ds)) == len(ks)
df_d = {k: [] for k in ks}
for d in ds:
for k in ks:
if k not in d:
df_d[k].append(None)
else:
df_d[k].append(d[k])
df = pandas.DataFrame(df_d)
return df
def copy_update_dict(d, d_other):
proc_d = dict(d)
proc_d.update(d_other)
return proc_d
def merge_dicts(ds):
out_d = {}
for d in ds:
for (k, v) in d.items():
assert k not in out_d
out_d[k] = v
return out_d
def groupby(xs, fn):
assert isinstance(xs, list)
d = {}
for x in xs:
fx = fn(x)
if fx not in d:
d[fx] = []
d[fx].append(x)
return d
def flatten(d):
assert isinstance(d, dict)
xs = []
for (_, k_xs) in d.items():
assert isinstance(k_xs, list)
xs.extend(k_xs)
return xs
def recursive_groupby(p, fn):
assert isinstance(p, (dict, list))
if isinstance(p, list):
return groupby(p, fn)
else:
return {k: recursive_groupby(k_p, fn) for (k, k_p) in p.items()}
def recursive_flatten(p):
assert isinstance(p, (dict, list))
if isinstance(p, list):
return list(p)
else:
xs = []
for (_, k_p) in p.items():
xs.extend(recursive_flatten(k_p))
return xs
def recursive_map(p, fn):
assert isinstance(p, (dict, list))
if isinstance(p, list):
return list(map(fn, p))
else:
return {k: recursive_map(k_p, fn) for (k, k_p) in p.items()}
def recursive_index(d, ks):
for k in ks:
d = d[k]
return d
def filter_dict(d, fn):
return {k: v for (k, v) in d.items() if fn(k, v)}
def map_dict(d, fn):
return {k: fn(k, v) for (k, v) in d.items()}
def invert_injective_dict(a_to_b):
b_to_a = dict([(b, a) for (a, b) in a_to_b.items()])
return b_to_a
def invert_noninjective_dict(a_to_b):
d = {}
for (x, y) in a_to_b.items():
if y not in d:
d[y] = []
d[y].append(x)
return d
def structure(ds, ks):
get_fn = lambda k: lambda x: x[k]
for k in ks:
ds = recursive_groupby(ds, get_fn(k))
return ds
def structure_with_fns(ds, fns):
for fn in fns:
ds = recursive_groupby(ds, fn)
return ds
def get_subset_indexing_fn(ks, tuple_fmt=True):
def fn(d):
assert isinstance(d, dict)
out = [d[k] for k in ks]
if tuple_fmt:
out = tuple(out)
return out
return fn
def flatten_nested_list(xs):
assert isinstance(xs, list)
xs_res = []
for x_lst in xs:
assert isinstance(x_lst, list)
xs_res.extend(x_lst)
return xs_res
def key_union(ds):
ks = []
for d in ds:
ks.extend(list(d.keys()))
return list(set(ks))
def key_intersection(ds):
assert len(ds) > 0
ks = set(ds[0].keys())
for d in ds[1:]:
ks.intersection_update(list(d.keys()))
return list(ks)
# NOTE: right now, done for dictionaries with hashable values.
def key_to_values(ds):
out_d = {}
for d in ds:
for (k, v) in d.items():
if k not in out_d:
out_d[k] = set()
out_d[k].add(v)
return out_d
# NOTE: for now, no checking regarding the existence of the key.
def subset_dict_via_selection(d, ks):
return {k: d[k] for k in ks}
def subset_dict_via_deletion(d, ks):
out_d = dict(d)
for k in ks:
out_d.pop(k)
return out_d
def set_dict_values(d_to,
d_from,
abort_if_exists=False,
abort_if_notexists=True):
assert (not abort_if_exists) or all([k not in d_to for k in d_from.keys()])
assert (not abort_if_notexists) or all([k in d_to for k in d_from.keys()])
d_to.update(d_from)
def sort_dict_items(d, by_key=True, decreasing=False):
key_fn = (lambda x: x[0]) if by_key else (lambda x: x[1])
return sorted(list(d.items()), key=key_fn, reverse=decreasing)
def print_dict(d, width=1):
pprint.pprint(d, width=width)
def pprint_dict_keys(d, ks):
pprint.pprint({k: d[k] for k in ks})
def collapse_nested_dict(d, sep='.'):
assert all([type(k) == str for k in d.keys()]) and (all(
[all([type(kk) == str for kk in d[k].keys()]) for k in d.keys()]))
ps = []
for k in d.keys():
for (kk, v) in d[k].items():
p = (k + sep + kk, v)
ps.append(p)
return dict(ps)
def uncollapse_nested_dict(d, sep='.'):
assert all([type(k) == str for k in d.keys()]) and (all(
[len(k.split()) == 2 for k in d.keys()]))
out_d = []
for (k, v) in d.items():
(k1, k2) = k.split(sep)
if k1 not in out_d:
d[k1] = {}
d[k1][k2] = v
return out_d | /research_toolbox-0.1-py3-none-any.whl/research_toolbox/tb_utils.py | 0.477311 | 0.507995 | tb_utils.py | pypi |
from typing import Any, Callable, Dict, List, Tuple, Union
import logging
from ..decorators.logging import logger
from pathlib import Path
import sqlite3
from sqlite3 import Error, Connection, Cursor
from .typing.sql import FieldOption
from .typing.operator import ConditionSQLArgument, OperatorSQLArgument
def create_db_if_not_exist(database) -> None:
file = Path(database)
file.parents[0].mkdir(parents=True, exist_ok=True)
file.touch(exist_ok=True)
logging.debug(f'{file} is created')
@logger()
def create_connection(database=':memory:', *args, **kwargs) -> Connection:
"""Create a Database Connection
Args:
database: {StrOrBytesPath} -- use ":memory:" to open a database
connection to a database that resides in RAM instead of on disk
check_same_thread: {bool} -- If set False, the returned connection
may be shared across multiple threads {default: {True}}
Returns:
Connection: ...
"""
if database != ':memory:':
create_db_if_not_exist(database)
conn = None
try:
conn = sqlite3.connect(database, *args, **kwargs)
except Error as e:
logging.error(e)
return conn
def create_table(conn: Connection, table_name: str, fields: Dict[str, FieldOption]):
"""Create Table
Args:
conn (Connection): The Connection object
table_name (str): Name of table
fields (Dict[str, FieldOption]): Dictionary describing information of the fields
"""
sql = ''.join((f'CREATE TABLE IF NOT EXISTS {table_name} (',
*(f'{key} {value.sql}{"," if index != len(fields) - 1 else ""}\n'
for index, (key, value) in enumerate(fields.items())), ');'))
logging.info(sql)
try:
cur = conn.cursor()
cur.execute(sql)
except Error as e:
logging.error(e)
def insert(conn: Connection, table_name: str, fields: Union[List[str], Tuple[str]],
values: Union[List[Tuple], Tuple]):
"""Create Row or Rows into Custom Table
Args:
conn (Connection): The Connection object
table_name (str): Name of table
fields (Union[List[str], Tuple[str]]):
values (Union[List[Tuple], Tuple]):
"""
sql = f"""INSERT INTO {table_name} ({', '.join(map(str, fields))})
VALUES
({', '.join(['?' for _ in fields])})
"""
logging.info(sql)
try:
cur = conn.cursor()
if isinstance(values, tuple):
cur.execute(sql, values)
else:
cur.executemany(sql, values)
conn.commit()
except Error as e:
logging.error(e)
@logger()
def query(conn: Connection,
table_name: str,
fields: Union[List[str], Tuple[str]] = None,
row_factory: Callable[[Cursor, Tuple], Any] = None,
where: Union[OperatorSQLArgument, ConditionSQLArgument] = None) -> List:
"""Query All Rows from Custom Table
Args:
conn (Connection): The Connection object
table_name (str): Name of table
fields (Union[List[str], Tuple[str]], optional): Defaults to None.
where (Union[OperatorSQLArgument, ConditionSQLArgument], optional): Defaults to None.
Returns:
List: List of rows
"""
sql = f"""SELECT {', '.join(map(str, fields)) if fields is not None and len(fields) > 0 else '*'} FROM {table_name}
{f'WHERE {where.sql}' if where is not None else ''}
"""
logging.info(sql)
if row_factory is not None:
conn.row_factory = row_factory
try:
cur = conn.cursor()
cur.execute(sql)
rows = cur.fetchall()
return rows
except Error as e:
logging.error(e) | /research_utils-2021.11.24.5-py3-none-any.whl/research_utils/sqlite/functional.py | 0.733738 | 0.230714 | functional.py | pypi |
import math
import torch.nn as nn
from functools import reduce
def next_conv_dim(conv: nn.Conv2d, in_img_size: int):
kernel_size = conv.kernel_size[0] if type(
conv.kernel_size) is tuple else conv.kernel_size
stride = conv.stride[0] if type(conv.stride) is tuple else conv.stride
padding = conv.padding[0] if type(conv.padding) is tuple else conv.padding
dilation = conv.dilation[0] if type(
conv.dilation) is tuple else conv.dilation
out_img_size = math.floor((
in_img_size +
2 * padding - dilation * (kernel_size - 1) - 1
) / stride + 1)
return out_img_size
def next_conv_transpose_dim(convt: nn.ConvTranspose2d, in_img_size: int):
kernel_size = convt.kernel_size[0] if type(
convt.kernel_size) is tuple else convt.kernel_size
stride = convt.stride[0] if type(convt.stride) is tuple else convt.stride
padding = convt.padding[0] if type(convt.padding) is tuple else convt.padding
dilation = convt.dilation[0] if type(
convt.dilation) is tuple else convt.dilation
output_padding = convt.output_padding[0] if type(convt.output_padding) is tuple else convt.output_padding
out_img_size = math.floor(
(in_img_size - 1) * stride -
2 * padding + dilation * (kernel_size - 1) + output_padding + 1
)
return out_img_size
def next_conv_block_dim(block: nn.Module, in_img_size: int):
out_img_size = in_img_size
for module in block:
if isinstance(module, (nn.Conv2d, nn.MaxPool2d)):
out_img_size = next_conv_dim(module, out_img_size)
elif isinstance(module, nn.ConvTranspose2d):
out_img_size = next_conv_transpose_dim(module, out_img_size)
elif isinstance(module, (nn.Upsample, nn.UpsamplingBilinear2d, nn.UpsamplingNearest2d)):
out_img_size *= module.scale_factor
return out_img_size
def final_conv_dim(model: nn.Module, in_img_size: int):
children = model.children()
out_img_size = reduce(
lambda _in_img_size, conv: next_conv_dim(conv, _in_img_size),
children,
in_img_size
)
return out_img_size | /research_utils-2021.11.24.5-py3-none-any.whl/research_utils/torch/modeling.py | 0.755997 | 0.514888 | modeling.py | pypi |
from .base import Writer
import loopgpt
LATEX_TEMPLATE = """\\documentclass{{report}}
\\title{{{title}}}
\\date{{{date}}}
\\begin{{document}}
\\maketitle
\\tableofcontents
{content}
\\end{{document}}
"""
class LatexWriter(Writer):
@staticmethod
@loopgpt.aifunc()
def write_section(section: str) -> str:
"""Writes a latex \section{} command with the section name and then a short section about the given topic without going into specific details
as there will be future subsections for that. The section should be less than 50 words long and should mention at least 3 subtopics
that will be covered in future subsections.
Args:
section (str): The section to write the introduction for.
Returns:
str: The introduction written in LaTeX format.
"""
@staticmethod
@loopgpt.aifunc()
def write_subsection(subsection: str) -> str:
"""Writes a latex \subsection{} command with the subsection name and then a detailed subsection about the given topic that should contain as much
details as possible including reference links. The subsection should be between 500 and 1000 words long and should cover the topic in depth.
It must be in valid LaTeX with correct escaping.
Args:
subsection (str): The subsection to write about.
Returns:
str: The section written in LaTeX format.
"""
class ShortLatexWriter(LatexWriter):
@staticmethod
@loopgpt.aifunc()
def write_subsection(subsection: str) -> str:
"""This is a semantic function. It writes a latex subsection command followed by the subsection name and then a very short subsection
content including links to relevant websites.
Writes a latex \subsection{} command with the subsection name and then a short subsection about the given topic that should be between 100 and 200
words long. It must be in valid LaTeX with correct escaping. Includes any relevant links that were found previously where the reader can
find more information.
Args:
subsection (str): The subsection to write about.
Returns:
str: The section written in LaTeX format.
""" | /writers/latex.py | 0.670608 | 0.257952 | latex.py | pypi |
from typing import List
import urllib.parse
from .adapter import Adapter
__all__ = ['Wrapper']
class Wrapper:
"""Wrapper class for the Adapter class.
This class is used to wrap the Adapter class and provide a more
convenient interface for the user.
"""
def __init__(self, adapter: Adapter) -> None:
self._adapter = adapter
def get_bulk(self, params=None) -> dict:
"""Get a list of researchers from the API.
Parameters
----------
params : :class:`dict`
A dictionary containing the parameters to be passed to the API.
The payload to send to the API. Defaults to None.
Returns
-------
:class:`dict`
"""
return self._adapter.get_bulk(params=params)
def set_bulk(self, jsondata=None, params=None) -> dict:
"""Get a list of researchers from the API.
Parameters
----------
jsondata : :class:`dict`
A dictionary containing the parameters to be passed to the API.
The payload to send to the API. Defaults to None.
params : :class:`dict`
A dictionary containing the parameters to be passed to the API.
Returns
-------
:class:`dict`
"""
if params is None:
params = {}
if jsondata is None:
jsondata = {}
data = self._adapter.set_bulk(params=params, jsondata=jsondata)
print(data)
bulk_data = {}
bulk_data['id'] = urllib.parse.parse_qs(urllib.parse.urlparse(data['url']).query)['id'][0]
error = self._adapter.get_bulk_results(bulk_data)
bulk_data['display_type'] = "success"
print(bulk_data)
succeed = self._adapter.get_bulk_results(bulk_data)
print(succeed)
print(error)
return self._adapter.get_bulk_results(bulk_data)
def set_bulk_apply(self, params=None) -> dict:
"""Get a list of researchers from the API.
Parameters
----------
params : :class:`dict`
A dictionary containing the parameters to be passed to the API.
Returns
-------
:class:`dict`
"""
if params is None:
params = {}
return self._adapter.set_bulk_apply(params=params)
def get_bulk_results(self, params=None) -> dict:
"""Get a list of researchers from the API.
Parameters
----------
params : :class:`dict`
A dictionary containing the parameters to be passed to the API.
Returns
-------
:class:`dict`
"""
if params is None:
params = {}
return self._adapter.get_bulk_results(params=params)
def search_researcher(self, payload=None) -> dict:
"""Search for a researcher in the API.
Parameters
----------
payload : :class:`dict`
A dictionary containing the parameters to be passed to the API.
The payload to send to the API. Defaults to None.
Returns
-------
:class:`dict`
"""
if payload is None:
payload = {}
return self._adapter.search_researcher(payload)
def usage(self) -> dict:
return self._adapter.get_usage() | /researchmap.py-0.0.1.tar.gz/researchmap.py-0.0.1/researchmap/wrapper.py | 0.877254 | 0.524577 | wrapper.py | pypi |
import json
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Union
import jwt
import aiohttp
import datetime
import requests
import re
import urllib.parse
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import pprint
from .errors import (UnsupportedResponseType, UnauthorizedClient, AccessDenied, InvalidClient, InvalidScope,
InvalidGrant, UnsupportedGrantType, InvalidVersion, ParseError, InvalidNonce,
InvalidRequest, InvalidToken, MalformedToken, InsufficientScope, InvalidIP,
Forbidden, NotFound, MethodNotAllowed, MaxSearchResult, DatabaseError,
ServerError, InternalServerError, HTTPException)
__all__ = ['Authentication', 'Auth', 'Adapter', 'RequestsAdapter', 'AiohttpAdapter']
class Authentication(metaclass=ABCMeta):
def __init__(self, client_id, client_secret, scope, *, iat: int = 30, exp: int = 30, sub="0", trial: bool = False):
self.trial = trial
self.endpoint = 'https://api.researchmap.jp/oauth2/token' if not self.trial else 'https://api-trial.researchmap.jp/oauth2/token'
self.version = "2"
self.grant_type = "urn:ietf:params:oauth:grant-type:jwt-bearer"
self.algorithm = "RS256"
self.client_id = client_id
self.client_secret = client_secret
self.scope = scope
self.iat = iat
self.exp = exp
self.sub = sub
self.now = datetime.datetime.now(datetime.timezone.utc)
@abstractmethod
def gen_jwt(self) -> bytes:
raise NotImplementedError()
@abstractmethod
def gen_pubkey(self) -> bytes:
raise NotImplementedError()
@abstractmethod
def is_authorization(self, _jwt: str, client_public: str) -> bool:
raise NotImplementedError()
@abstractmethod
def get_access_token_response(self, jwt: str, **kwargs) -> Optional[Union[list, dict]]:
raise NotImplementedError()
@abstractmethod
def get_access_token(self, *, access_token_response: str) -> str:
raise NotImplementedError()
@abstractmethod
def get_usage(self) -> dict:
raise NotImplementedError()
def _check_status(self, status_code, response, data) -> Union[dict, list]:
if 200 <= status_code < 300:
return data
error_messages = data.get('error', '') if data else ''
message = data.get('error_description', '') if data else ''
if status_code == 302 and error_messages == 'unsupported_response_type':
raise UnsupportedResponseType(response, message)
elif status_code == 400 and error_messages == 'unauthorized_client':
raise UnauthorizedClient(response, message, error="unauthorized_client")
elif status_code == 400 and error_messages == 'access_denied':
raise AccessDenied(response, message, error="access_denied")
elif status_code == 400 and error_messages == 'invalid_client':
raise InvalidClient(response, message, error="invalid_client")
elif status_code == 400 and error_messages == 'invalid_scope':
raise InvalidScope(response, message, error="invalid_scope")
elif status_code == 400 and error_messages == 'invalid_grant':
raise InvalidGrant(response, message, error="invalid_grant")
elif status_code == 400 and error_messages == 'unsupported_grant_type':
raise UnsupportedGrantType(response, message, error="unsupported_grant_type")
elif status_code == 400 and error_messages == 'invalid_version':
raise InvalidVersion(response, message, error="invalid_version")
elif status_code == 400 and error_messages == 'parse_error':
raise ParseError(response, message, error="parse_error")
elif status_code == 400 and error_messages == 'invalid_nonce':
raise InvalidNonce(response, message, error="invalid_nonce")
elif (status_code == 400 or status_code == 405) and error_messages == 'invalid_request':
raise InvalidRequest(response, message, error="invalid_request")
elif status_code == 401 and error_messages == 'invalid_token':
raise InvalidToken(response, message, error="invalid_token")
elif status_code == 401 and error_messages == 'malformed_token':
raise MalformedToken(response, message, error="malformed_token")
elif status_code == 401 and error_messages == 'insufficient_scope':
raise InsufficientScope(response, message, error="insufficient_scope")
elif status_code == 401 and error_messages == 'invalid_ip':
raise InvalidIP(response, message, error="invalid_ip")
elif status_code == 403:
raise Forbidden(response, message, error="forbidden")
elif status_code == 404:
raise NotFound(response, message, error="not_found")
elif status_code == 405 and error_messages == 'method_not_allowed':
raise MethodNotAllowed(response, message, error="method_not_allowed")
elif status_code == 416 and error_messages == 'max_search_result':
raise MaxSearchResult(response, message, error="max_search_result")
elif status_code == 500 and error_messages == 'database_error':
raise DatabaseError(response, message, error="database_error")
elif status_code == 500 and error_messages == 'server_error':
raise ServerError(response, message, error="server_error")
elif 500 <= status_code < 600:
raise InternalServerError(response, message)
else:
raise HTTPException(response, message)
class Auth(Authentication):
"""Researchmap authentication interface.
Parameters
----------
client_id: :class:`str`
Client ID.
client_secret: :class:`bytes`
Client secret key.
Keyword Arguments
-----------------
iat: :class:`int`
Issued at [sec].
exp: :class:`int`
Expire at [sec].
sub: :class:`int`
Subject.
trial: :class:`bool`
Trial mode.
"""
@property
def is_trial(self) -> bool:
"""Get trial mode.
Returns
-------
:class:`bool`
Trial mode.
"""
return self.trial
@property
def time_now(self) -> datetime.datetime:
"""Get current time [aware].
Returns
-------
:class:`datetime.datetime`
Current time of UTC.
"""
return self.now
@property
def time_iat(self) -> datetime.datetime:
"""Get issued at time [aware].
Returns
-------
:class:`datetime.datetime`
Issued at time of UTC.
"""
return self.now - datetime.timedelta(seconds=self.iat)
@property
def time_exp(self) -> datetime.datetime:
"""Get expire at time [aware].
Returns
-------
:class:`datetime.datetime`
Expire at time of UTC.
"""
return self.now + datetime.timedelta(seconds=self.exp)
@property
def token(self) -> str:
"""Get token.
Returns
-------
:class:`str`
Token.
Raises
------
:exc:`InvalidToken`
Invalid token.
:class:`json.JSONDecodeError`
JSON decode error.
:class:`requests.exceptions.HTTPError`
HTTP error.
"""
return self.get_access_token()
def gen_jwt(self, *, exp: int = None, iat: int = None, sub: str = None) -> bytes:
"""Generate JWT.
Keyword Arguments
-----------------
exp: :class:`int`
Expire at [sec].
iat: :class:`int`
Issued at [sec].
sub: :class:`int`
Subject.
Returns
-------
:class:`bytes`
JWT.
"""
if exp is None:
exp = self.exp
if iat is None:
iat = self.iat
if sub is None:
sub = self.sub
payload = {
"iss": self.client_id,
"aud": self.endpoint,
"sub": sub,
"iat": self.now - datetime.timedelta(seconds=iat),
"exp": self.now + datetime.timedelta(seconds=exp),
}
_jwt = jwt.encode(payload, self.client_secret,
algorithm=self.algorithm)
return _jwt
def gen_pubkey(self, *, client_secret: str = None) -> bytes:
"""
Generate public key.
Keyword Arguments
-----------------
client_secret: :class:`str`
Client secret key.
Returns
-------
:class:`bytes`
Client public key.
"""
if client_secret is None:
client_secret = self.client_secret
privkey = serialization.load_pem_private_key(
client_secret,
password=None,
backend=default_backend()
)
pubkey = privkey.public_key()
client_public = pubkey.public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
return client_public
def is_authorization(self, *, _jwt: str = None, client_public: str = None) -> bool:
"""Check authorization.
Keyword Arguments
-----------------
_jwt: :class:`str`
JWT.
client_public: :class:`str`
Client public key.
Returns
-------
:class:`bool`
True if authorization.
Raises
------
:class:`jwt.InvalidTokenError`
Invalid JWT.
"""
if _jwt is None:
_jwt = self.gen_jwt()
if client_public is None:
client_public = self.gen_pubkey()
try:
decoded_jwt = jwt.decode(_jwt, key=client_public,
audience=self.endpoint, algorithms=self.algorithm)
if decoded_jwt['iss'] == self.client_id and decoded_jwt['sub'] == self.sub and decoded_jwt[
'aud'] == self.endpoint:
return True
except:
print("The signature of JWT cannot be verified.")
return False
def get_access_token_response(self, *, _jwt: bytes = None, **kwargs) -> Optional[Union[list, dict]]:
"""Get access token.
Keyword Arguments
----------
_jwt: :class:`bytes`
JWT.
Returns
-------
Optional[Union[:class:`list`, :class:`dict`]]
Access token.
Raises
------
:exc:`HTTPException`
An unknown HTTP related error occurred, usually when it isn’t 200 or the known incorrect credentials passing status code.
"""
if _jwt is None:
_jwt = self.gen_jwt()
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
params = {
'grant_type': self.grant_type,
'assertion': _jwt,
'scope': self.scope,
'version': self.version
}
payload = urllib.parse.urlencode(params)
if self.is_authorization():
req_access_token = requests.post(url=self.endpoint, headers=headers, data=payload)
try:
data = req_access_token.json()
except json.JSONDecodeError:
redata = re.sub('[\r\n]+$', ' ', req_access_token.content.decode(req_access_token.encoding))
data = {}
data["error"] = re.search("<h1>(.+)</h1>", redata).groups()[0].lower()
return self._check_status(req_access_token.status_code, req_access_token, data)
else:
print("Access Token is not valid")
def get_access_token(self, *, access_token_response: Optional[Union[list, dict]] = None) -> str:
"""Get access token.
Keyword Arguments
----------
access_token_response: :class: Optional[Union[:class:`list`, :class:`dict`]]
Access token response.
Returns
-------
:class:`str`
Access token.
Raises
------
:class:`TypeError`
The type of the argument is not correct.
:exc:`HTTPException`
An unknown HTTP related error occurred, usually when it isn’t 200 or the known incorrect credentials passing status code.
:exc:`InvalidToken`
Invalid token.
"""
if access_token_response is None:
access_token_response = self.get_access_token_response()
return access_token_response['access_token']
def get_usage(self) -> None:
return None
class Adapter(metaclass=ABCMeta):
def __init__(self, authentication_key: str, trial: bool = False) -> None:
self.trial = trial
self.base_url = 'https://api.researchmap.jp/{permalink}/{archivement_type}?{query}' if not self.trial \
else 'https://api-trial.researchmap.jp/{permalink}/{archivement_type}?{query}'
self.authentication_key = authentication_key
self.payload = {}
@abstractmethod
def request(self, method: str, permalink: str, *,
archivement_type: str, payload: dict, **kwargs) -> Optional[Union[list, dict]]:
raise NotImplementedError()
@abstractmethod
def get_bulk(self, params: dict):
raise NotImplementedError()
@abstractmethod
def set_bulk(self, jsondata: dict, params: dict):
raise NotImplementedError()
@abstractmethod
def set_bulk_apply(self, params):
raise NotImplementedError()
@abstractmethod
def get_bulk_results(self, params: dict):
raise NotImplementedError()
@abstractmethod
def search_researcher(self, payload: dict):
raise NotImplementedError()
@abstractmethod
def get_usage(self) -> dict:
raise NotImplementedError()
def _check_status(self, status_code, response, data) -> Union[dict, list]:
if 200 <= status_code < 300:
return data
print(data)
error_messages = data.get('error', '') if data else ''
message = data.get('error_description', '') if data else ''
if status_code == 302 and error_messages == 'unsupported_response_type':
raise UnsupportedResponseType(response, message)
elif status_code == 400 and error_messages == 'unauthorized_client':
raise UnauthorizedClient(response, message, error="unauthorized_client")
elif status_code == 400 and error_messages == 'access_denied':
raise AccessDenied(response, message, error="access_denied")
elif status_code == 400 and error_messages == 'invalid_client':
raise InvalidClient(response, message, error="invalid_client")
elif status_code == 400 and error_messages == 'invalid_scope':
raise InvalidScope(response, message, error="invalid_scope")
elif status_code == 400 and error_messages == 'invalid_grant':
raise InvalidGrant(response, message, error="invalid_grant")
elif status_code == 400 and error_messages == 'unsupported_grant_type':
raise UnsupportedGrantType(response, message, error="unsupported_grant_type")
elif status_code == 400 and error_messages == 'invalid_version':
raise InvalidVersion(response, message, error="invalid_version")
elif status_code == 400 and error_messages == 'parse_error':
raise ParseError(response, message, error="parse_error")
elif status_code == 400 and error_messages == 'invalid_nonce':
raise InvalidNonce(response, message, error="invalid_nonce")
elif (status_code == 400 or status_code == 405) and error_messages == 'invalid_request':
raise InvalidRequest(response, message, error="invalid_request")
elif status_code == 401 and error_messages == 'invalid_token':
raise InvalidToken(response, message, error="invalid_token")
elif status_code == 401 and error_messages == 'malformed_token':
raise MalformedToken(response, message, error="malformed_token")
elif status_code == 401 and error_messages == 'insufficient_scope':
raise InsufficientScope(response, message, error="insufficient_scope")
elif status_code == 401 and error_messages == 'invalid_ip':
raise InvalidIP(response, message, error="invalid_ip")
elif status_code == 403:
raise Forbidden(response, message, error="forbidden")
elif status_code == 404:
raise NotFound(response, message, error="not_found")
elif status_code == 405 and error_messages == 'method_not_allowed':
raise MethodNotAllowed(response, message, error="method_not_allowed")
elif status_code == 416 and error_messages == 'max_search_result':
raise MaxSearchResult(response, message, error="max_search_result")
elif status_code == 500 and error_messages == 'database_error':
raise DatabaseError(response, message, error="database_error")
elif status_code == 500 and error_messages == 'server_error':
raise ServerError(response, message, error="server_error")
elif 500 <= status_code < 600:
raise InternalServerError(response, message)
else:
raise HTTPException(response, message)
class RequestsAdapter(Adapter):
def request(self, method: str, permalink: str, *,
archivement_type: str = None, query: str = None, params=None, payload=None, jsondata=None, **kwargs) -> Optional[
Union[list, dict]]:
if archivement_type is None:
archivement_type = ""
if query is None:
query = ""
if payload is None:
payload = {}
if params is None:
params = {}
if jsondata is None:
jsondata = {}
headers = {
'Authorization': 'Bearer {}'.format(self.authentication_key),
'Accept': 'application/ld+json,application/json;q=0.1',
'Content-Type': 'application/x-www-form-urlencoded'
}
url = self.base_url.format(permalink=permalink, archivement_type=archivement_type, query=query)
resp = requests.request(method, url, params=params, data=payload, json=jsondata, headers=headers, **kwargs)
try:
data = resp.json()
except jsondata.JSONDecodeError:
data = resp.content
return self._check_status(resp.status_code, resp, data)
def get_bulk(self, params=None) -> Union[list, dict, None]:
"""
Get bulk data from the API.
Parameters
----------
params : :class:`dict`
A dictionary containing the parameters for the request.
Returns
-------
:class:`dict` or :class:`list`
A dictionary or list containing the data returned by the API.
"""
if params is None:
params = {}
data = self.request('POST', '_bulk', params=params)
print(data)
return data
def set_bulk(self, params=None, jsondata=None) -> Union[list, dict, None]:
"""
Set bulk data to the API.
Parameters
----------
params : :class:`dict`
A dictionary containing the data to be set.
jsondata
Returns
-------
:class:`dict` or :class:`list`
A dictionary or list containing the data returned by the API.
"""
if params is None:
params = {}
if jsondata is None:
jsondata = {}
data = self.request('POST', '_bulk', params=params, jsondata=jsondata)
return data
def set_bulk_apply(self, params=None) -> Union[list, dict, None]:
"""
Set bulk data to the API.
Parameters
----------
params : :class:`dict`
A dictionary containing the data to be set.
Returns
-------
:class:`dict` or :class:`list`
A dictionary or list containing the data returned by the API.
"""
if params is None:
params = {}
data = self.request('POST', '_bulk', params=params)
return data
def get_bulk_results(self, params=None) -> Union[list, dict, None]:
"""
Get bulk results from the API.
Parameters
----------
params : :class:`dict`
A dictionary containing the parameters for the request.
Returns
-------
:class:`dict` or :class:`list`
A dictionary or list containing the data returned by the API.
"""
if params is None:
params = {}
data = self.request('GET', '_bulk_results', params=params)
return data
def search_researcher(self, payload=None) -> Union[list, dict, None]:
""" Search for researchers.
Parameters
----------
payload : :class:`dict`
A dictionary containing the parameters for the request.
Returns
-------
:class:`dict` or :class:`list`
A dictionary or list containing the data returned by the API.
"""
if payload is None:
payload = {}
data = self.request('GET', 'researchers', payload=payload)
return data
def get_researcher_profile(self, permalink, payload=None) -> Union[list, dict, None]:
""" Get a researcher profile.
Parameters
----------
permalink : :class:`str`
The permalink of the researcher.
payload : :class:`dict`
A dictionary containing the parameters for the request.
Returns
-------
:class:`dict` or :class:`list`
A dictionary or list containing the data returned by the API.
"""
if payload is None:
payload = {}
data = self.request('GET', permalink, archivement_type='profile', payload=payload)
return data
def get_usage(self) -> None:
return None
class AiohttpAdapter(Adapter):
async def request(self, method: str, permalink: str, *,
archivement_type: str = "", query: str = "", payload=None, **kwargs) -> Optional[
Union[list, dict]]:
if payload is None:
payload = {}
payload['auth_key'] = self.authentication_key
url = self.base_url.format(permalink=permalink, archivement_type=archivement_type, query=query)
async with aiohttp.request(
method, url, data=payload, **kwargs) as resp:
try:
data = await resp.json(content_type=None)
except json.JSONDecodeError:
data = await resp.read()
status_code = resp.status
return self._check_status(status_code, resp, data)
async def get_bulk(self, payload=None) -> Union[list, dict, None]:
"""
Get bulk data from the API.
Parameters
----------
payload : :class:`dict`
A dictionary containing the parameters for the request.
Returns
-------
:class:`dict` or :class:`list`
A dictionary or list containing the data returned by the API.
"""
if payload is None:
payload = {}
data = await self.request('POST', '/_bulk', payload=payload)
return data
async def search_researcher(self, payload=None) -> Union[list, dict, None]:
""" Search for researchers.
Parameters
----------
payload : :class:`dict`
A dictionary containing the parameters for the request.
Returns
-------
:class:`dict` or :class:`list`
A dictionary or list containing the data returned by the API.
"""
if payload is None:
payload = {}
data = await self.request('POST', '/researchers', payload=payload)
return data
async def get_researcher_profile(self, permalink, payload=None) -> Union[list, dict, None]:
""" Get a researcher profile.
Parameters
----------
permalink : :class:`str`
The permalink of the researcher.
payload : :class:`dict`
A dictionary containing the parameters for the request.
Returns
-------
:class:`dict` or :class:`list`
A dictionary or list containing the data returned by the API.
"""
if payload is None:
payload = {}
data = await self.request('POST', permalink, archivement_type='profile', payload=payload)
return data
async def get_usage(self) -> None:
return None | /researchmap.py-0.0.1.tar.gz/researchmap.py-0.0.1/researchmap/adapter.py | 0.822368 | 0.156137 | adapter.py | pypi |
from datetime import datetime
from typing import Iterator, List, Literal, NamedTuple
from .base import BaseClient
class Customer(NamedTuple):
"""Customer object"""
id: str
username: str
reseller_id: str
name: str
company: str
city: str
state: str
country: str
status: str
total_receipts: float
phone: str
phone_country_code: str
website_count: int
class SearchResponse(NamedTuple):
"""Represents the result of a customer search"""
page_records: int
db_records: int
customers: List[Customer]
def __len__(self) -> int:
return len(self.customers)
def __iter__(self) -> Iterator:
return self.customers.__iter__()
class CustomersClient(BaseClient):
"""Customers API Client"""
def search(
self,
records: int,
page: int,
customers: List[str] | str = None,
resellers: List[str] | str = None,
username: str = None,
name: str = None,
company: str = None,
city: str = None,
state: str = None,
status: Literal["Active", "Suspended", "Deleted"] = None,
creation_date_start: datetime = None,
creation_date_end: datetime = None,
total_receipt_start: float = None,
total_receipt_end: float = None,
) -> SearchResponse[Customer]:
"""Gets details of the Customers that match the search criteria
Args:
records (int): Number of records to be fetched.
page (int): Page number for which details are to be fetched
customers (List[str] | str, optional): Customer ID(s). Defaults to None.
resellers (List[str] | str, optional): Reseller ID(s) for whom Customer accounts need
to be searched. Defaults to None.
username (str, optional): Username of Customer. Should be an email address.
Defaults to None.
name (str, optional): Name of Customer. Defaults to None.
company (str, optional): Comany name of Customer. Defaults to None.
city (str, optional): City. Defaults to None.
state (str, optional): State. Defaults to None.
status (Literal["Active", "Suspended", "Deleted"], optional): Status of Customer.
Defaults to None.
creation_date_start (datetime, optional): DateTime for listing of Customer accounts
whose Creation Date is greater than. Defaults to None.
creation_date_end (datetime, optional): DateTime for listing of Customer accounts whose
Creation Date is less than. Defaults to None.
total_receipt_start (float, optional): Total receipts of Customer which is greater than.
Defaults to None.
total_receipt_end (float, optional): Total receipts of Customer which is less than.
Defaults to None.
Returns:
SearchResponse[Customer]: Object containing Customer objects for each client matching
search criteria
"""
if isinstance(customers, str):
customers = [customers]
if isinstance(resellers, str):
resellers = [resellers]
if creation_date_start:
creation_date_start = creation_date_start.timestamp()
if creation_date_end:
creation_date_end = creation_date_end.timestamp()
url = self._urls.customers.get_search_url()
params = {
"no-of-records": records,
"page-no": page,
"customer-id": customers,
"reseller-id": resellers,
"username": username,
"name": name,
"company": company,
"city": city,
"state": state,
"status": status,
"creation-date-start": creation_date_start,
"creation-date-end": creation_date_end,
"total-receipt-start": total_receipt_start,
"total-receipt-end": total_receipt_end,
}
data = self._get_data(url, params)
recsonpage = int(data.get("recsonpage"))
recsindb = int(data.get("recsindb"))
customers = []
for key, value in data.items():
if key.isdigit():
customer_data = {k.split(".")[1]: v for k, v in value.items()}
customer_params = {
"id": customer_data["customerid"],
"username": customer_data["username"],
"reseller_id": customer_data["resellerid"],
"name": customer_data["name"],
"company": customer_data["company"],
"city": customer_data["city"],
"state": customer_data.get("state"),
"country": customer_data["country"],
"status": customer_data["customerstatus"],
"total_receipts": float(customer_data["totalreceipts"]),
"phone": customer_data["telno"],
"phone_country_code": customer_data["telnocc"],
"website_count": int(customer_data["websitecount"]),
}
customers.append(Customer(**customer_params))
return SearchResponse(recsonpage, recsindb, customers) | /resellerclub_python-0.0.1-py3-none-any.whl/resellerclub/client/customers.py | 0.879749 | 0.31059 | customers.py | pypi |
from typing import List, NamedTuple
from .base import BaseClient
class Availability(NamedTuple):
"""Domain name availability for TLDs"""
domain: str
status: str
classkey: str = None
class PremiumDomain(NamedTuple):
"""Premium Domain"""
domain: str
price: float
class Suggestion(NamedTuple):
"""Domain name suggestion"""
domain: str
status: str
in_ga: bool
score: float
spin: str
class DomainsClient(BaseClient):
"""Domains API Client. Methods to Search, Register or Renew domain names, etc."""
def check_availability(self, domain_names: list, tlds: list) -> List[Availability]:
"""Checks the availability of the specified domain name(s).
https://manage.resellerclub.com/kb/answer/764
Args:
domain_names (list): Domain name(s) that you need to check the availability for
tlds (list): TLDs for which the domain name availability needs to be checkedW
Returns:
List[Availability]: Returns a list containing domain name availability status for the
requested TLDs
"""
params = {"domain-name": domain_names, "tlds": tlds}
url = self._urls.domains.get_availability_check_url()
data = self._get_data(url, params)
return [Availability(dn, **a) for dn, a in data.items() if not dn == "errors"]
def check_idn_availability(
self, domain_names: list, tld: str, idn_language_code: str
) -> List[Availability]:
"""Checks the availability of the specified Internationalized Domain Name(s) (IDN)
https://manage.resellerclub.com/kb/answer/1427
Args:
domain_names (list): Internationalized Domaine Name(s) that you need to check the
availability for
tld (str): TLD for which the domain name availability needs to be checked
idn_language_code (str): While performing check availability for an Internationalized
Domain Name, you need to provide the corresponding language code
Returns:
List[Availability]: List containing domain name availability status for the requested
TLDs
"""
params = {
"domain-name": domain_names,
"tld": tld,
"idnLanguageCode": idn_language_code,
}
url = self._urls.domains.get_availability_check_url("idn")
data = self._get_data(url, params)
return [Availability(dn, **availability) for dn, availability in data.items()]
def check_premium_domain_availability(
self,
keyword: str,
tlds: list,
highest_price: int = None,
lowest_price: int = None,
max_results: int = None,
) -> List[PremiumDomain]:
"""Returns a list of Aftermarket Premium domain names based on the specified keyword.
This method only returns names available on the secondary market, and not those premium
names that are offered directly by any Registry for new registration.
https://manage.resellerclub.com/kb/answer/1948
Args:
keyword (str): Word or phrase (please enter the phrase without spaces) for which
premium search is requested
tlds (list): Domain name extensions (TLDs) you want to search in
highest_price (int, optional): Maximum price (in Reseller's Selling Currency) up to
which domain names must be suggested. Defaults to None.
lowest_price (int, optional): Minimum price (in Reseller's Selling Currency) for which
domain names must be suggested. Defaults to None.
max_results (int, optional): Number of results to be returned. Defaults to None.
Returns:
List[PremiumDomain]: List of domain names and prices
"""
params = {
"key-word": keyword,
"tlds": tlds,
"price-high": highest_price,
"price-low": lowest_price,
"no-of-results": max_results,
}
url = self._urls.domains.get_availability_check_url("premium")
data = self._get_data(url, params)
return [PremiumDomain(domain, float(price)) for domain, price in data.items()]
def check_third_level_name_availability(
self, domain_names: list
) -> List[Availability]:
"""Checks the availability of the specified 3rd level .NAME domain name(s).
https://manage.resellerclub.com/kb/node/2931
Args:
domain_names (list): Domain name(s) that you need to check the availability for.
Returns:
List[Availability]: List containing domain name availability status for the requested
domain names
"""
params = {"domain-name": domain_names, "tlds": "*.name"}
url = self._urls.domains.get_availability_check_url("3rd_level_dotname")
data = self._get_data(url, params)
return [Availability(dn, **availability) for dn, availability in data.items()]
def suggest_names(
self,
keyword: str,
tld_only: str = None,
exact_match: bool = None,
adult: bool = None,
) -> List[Suggestion]:
"""Returns domain name suggestions for a user-specified keyword.
https://manage.resellerclub.com/kb/answer/1085
Args:
keyword (str): Search term (keyword or phrase) e.g. "search" or "search world"
tld_only (str, optional): Specific TLD(s) you may want to search for. Defaults to None.
exact_match (bool, optional): Will return keyword alternatives when set to True.
Can be set to False to only return TLD alternatives. Defaults to None.
adult (bool, optional): If set to false, the suggestions will not contain any adult or
explicit suggestions which contain words like "nude", "porn", etc. Defaults to None.
Returns:
List[Suggestion]: List of domain name suggestions.
"""
params = {
"keyword": keyword,
"tld-only": tld_only,
"exact-match": exact_match,
"adult": adult,
}
url = self._urls.domains.get_name_suggestion_url()
data = self._get_data(url, params)
result = []
for domain, sug in data.items():
in_ga = bool(sug["in_ga"].lower() == "true")
score = float(sug["score"])
suggestion_params = [domain, sug["status"], in_ga, score, sug["spin"]]
result.append(Suggestion(*suggestion_params))
return result | /resellerclub_python-0.0.1-py3-none-any.whl/resellerclub/client/domains.py | 0.926794 | 0.416174 | domains.py | pypi |
import random
import time
import requests
class BackoffStrategy:
initial_interval: int
max_interval: int
exponent: float
max_elapsed_time: int
def __init__(self, initial_interval: int, max_interval: int, exponent: float, max_elapsed_time: int):
self.initial_interval = initial_interval
self.max_interval = max_interval
self.exponent = exponent
self.max_elapsed_time = max_elapsed_time
class RetryConfig:
strategy: str
backoff: BackoffStrategy
retry_connection_errors: bool
def __init__(self, strategy: str, retry_connection_errors: bool):
self.strategy = strategy
self.retry_connection_errors = retry_connection_errors
class Retries:
config: RetryConfig
status_codes: list[str]
def __init__(self, config: RetryConfig, status_codes: list[str]):
self.config = config
self.status_codes = status_codes
class TemporaryError(Exception):
response: requests.Response
def __init__(self, response: requests.Response):
self.response = response
class PermanentError(Exception):
inner: Exception
def __init__(self, inner: Exception):
self.inner = inner
def retry(func, retries: Retries):
if retries.config.strategy == 'backoff':
def do_request():
res: requests.Response
try:
res = func()
for code in retries.status_codes:
if "X" in code.upper():
code_range = int(code[0])
status_major = res.status_code / 100
if status_major >= code_range and status_major < code_range + 1:
raise TemporaryError(res)
else:
parsed_code = int(code)
if res.status_code == parsed_code:
raise TemporaryError(res)
except requests.exceptions.ConnectionError as exception:
if not retries.config.config.retry_connection_errors:
raise
raise PermanentError(exception) from exception
except requests.exceptions.Timeout as exception:
if not retries.config.config.retry_connection_errors:
raise
raise PermanentError(exception) from exception
except TemporaryError:
raise
except Exception as exception:
raise PermanentError(exception) from exception
return res
return retry_with_backoff(do_request, retries.config.backoff.initial_interval, retries.config.backoff.max_interval, retries.config.backoff.exponent, retries.config.backoff.max_elapsed_time)
return func()
def retry_with_backoff(func, initial_interval=500, max_interval=60000, exponent=1.5, max_elapsed_time=3600000):
start = round(time.time()*1000)
retries = 0
while True:
try:
return func()
except PermanentError as exception:
raise exception.inner
except Exception as exception: # pylint: disable=broad-exception-caught
now = round(time.time()*1000)
if now - start > max_elapsed_time:
if isinstance(exception, TemporaryError):
return exception.response
raise
sleep = ((initial_interval/1000) *
exponent**retries + random.uniform(0, 1))
if sleep > max_interval/1000:
sleep = max_interval/1000
time.sleep(sleep)
retries += 1 | /resend-client-sdk-python-1.8.2.tar.gz/resend-client-sdk-python-1.8.2/src/resend/utils/retries.py | 0.503906 | 0.198025 | retries.py | pypi |
import random
import time
import requests
class BackoffStrategy:
initial_interval: int
max_interval: int
exponent: float
max_elapsed_time: int
def __init__(self, initial_interval: int, max_interval: int, exponent: float, max_elapsed_time: int):
self.initial_interval = initial_interval
self.max_interval = max_interval
self.exponent = exponent
self.max_elapsed_time = max_elapsed_time
class RetryConfig:
strategy: str
backoff: BackoffStrategy
retry_connection_errors: bool
def __init__(self, strategy: str, retry_connection_errors: bool):
self.strategy = strategy
self.retry_connection_errors = retry_connection_errors
class Retries:
config: RetryConfig
status_codes: list[str]
def __init__(self, config: RetryConfig, status_codes: list[str]):
self.config = config
self.status_codes = status_codes
class TemporaryError(Exception):
response: requests.Response
def __init__(self, response: requests.Response):
self.response = response
class PermanentError(Exception):
inner: Exception
def __init__(self, inner: Exception):
self.inner = inner
def retry(fn, retries: Retries):
if retries.config.strategy == 'backoff':
def do_request():
res: requests.Response
try:
res = fn()
for code in retries.status_codes:
if "X" in code.upper():
codeRange = int(code[0])
s = res.status_code / 100
if s >= codeRange and s < codeRange + 1:
raise TemporaryError(res)
else:
parsed_code = int(code)
if res.status_code == parsed_code:
raise TemporaryError(res)
except requests.exceptions.ConnectionError as e:
if not retries.config.config.retry_connection_errors:
raise
else:
raise PermanentError(e)
except requests.exceptions.Timeout as e:
if not retries.config.config.retry_connection_errors:
raise
else:
raise PermanentError(e)
except TemporaryError:
raise
except Exception as e:
raise PermanentError(e)
return res
return retry_with_backoff(do_request, retries.config.backoff.initial_interval, retries.config.backoff.max_interval, retries.config.backoff.exponent, retries.config.backoff.max_elapsed_time)
else:
fn()
def retry_with_backoff(fn, initial_interval=500, max_interval=60000, exponent=1.5, max_elapsed_time=3600000):
start = round(time.time()*1000)
x = 0
while True:
try:
return fn()
except PermanentError as e:
raise e.inner
except Exception as e:
now = round(time.time()*1000)
if now - start > max_elapsed_time:
if isinstance(e, TemporaryError):
return e.response
else:
raise
sleep = ((initial_interval/1000) *
exponent**x + random.uniform(0, 1))
if sleep > max_interval/1000:
sleep = max_interval/1000
time.sleep(sleep)
x += 1 | /resend-client-sdk-python-1.8.2.tar.gz/resend-client-sdk-python-1.8.2/src/sdk/utils/retries.py | 0.461502 | 0.199717 | retries.py | pypi |
<div align="center">
<!-- <img src="https://github.com/reservoirpy/reservoirpy/raw/master/static/rpy_banner_bw.png"><br> !-->
<img src="./static/rpy_banner_bw_small-size.jpg"><br>
</div>

[](https://badge.fury.io/py/reservoirpy)
[](https://reservoirpy.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/reservoirpy/reservoirpy/actions/workflows/test.yml)
[](https://codecov.io/gh/reservoirpy/reservoirpy)
# ReservoirPy (v0.3.6) 🌀🧠
**Simple and flexible code for Reservoir Computing architectures like Echo State Networks (ESN).**
[](https://mybinder.org/v2/gh/reservoirpy/reservoirpy/HEAD)
```python
from reservoirpy.nodes import Reservoir, Ridge, Input
data = Input(input_dim=1)
reservoir = Reservoir(100, lr=0.3, sr=1.1)
readout = Ridge(ridge=1e-6)
esn = data >> reservoir >> readout
forecast = esn.fit(X, y).run(timeseries)
```
ReservoirPy is a simple user-friendly library based on Python scientific modules.
It provides a **flexible interface to implement efficient Reservoir Computing** (RC)
architectures with a particular focus on *Echo State Networks* (ESN).
Advanced features of ReservoirPy allow to improve computation time efficiency
on a simple laptop compared to basic Python implementation, with datasets of
any size.
Some of its features are: **offline and online training**, **parallel implementation**,
**sparse matrix computation**, fast spectral initialization, **advanced learning rules**
(e.g. *Intrinsic Plasticity*) etc. It also makes possible
to **easily create complex architectures with multiple reservoirs** (e.g. *deep reservoirs*),
readouts, and **complex feedback loops**.
Moreover, graphical tools are included to **easily explore hyperparameters**
with the help of the *hyperopt* library.
Finally, it includes several tutorials exploring exotic architectures
and examples of scientific papers reproduction.
This library works for **Python 3.8** and higher.
[Follow @reservoirpy](https://twitter.com/reservoirpy) updates and new releases on Twitter.
## Official documentation 📖
See [the official ReservoirPy's documentation](https://reservoirpy.readthedocs.io/en/latest/?badge=latest)
to learn more about the main features of ReservoirPy, its API and the installation process. Or you can access directly the [User Guide with tutorials](https://reservoirpy.readthedocs.io/en/latest/user_guide/index.html#user-guide).
## Quick example of how to code a deep reservoir

## Installation
```bash
pip install reservoirpy
```
(See below for more advanced installation options)
## Quick try ⚡
### An example on Chaotic timeseries prediction (MackeyGlass)
**Step 1: Load the dataset**
ReservoirPy comes with some handy data generator able to create synthetic timeseries
for well-known tasks such as Mackey-Glass timeseries forecasting.
```python
from reservoirpy.datasets import mackey_glass
X = mackey_glass(n_timesteps=2000)
```
**Step 2: Create an Echo State Network...**
...or any kind of model you wish to use to solve your task. In this simple
use case, we will try out Echo State Networks (ESNs), one of the
most minimal architecture of Reservoir Computing machines.
An ESN is made of
a *reservoir*, a random recurrent network used to encode our
inputs in a high-dimensional (non-linear) space, and a *readout*, a simple
feed-forward layer of neurons in charge with *reading-out* the desired output from
the activations of the reservoir.
```python
from reservoirpy.nodes import Reservoir, Ridge
reservoir = Reservoir(units=100, lr=0.3, sr=1.25)
readout = Ridge(output_dim=1, ridge=1e-5)
```
We here obtain a reservoir with 100 neurons, a *spectral radius* of 1.25 and
a *leak rate* of 0.3 (you can learn more about these hyperparameters going through
the tutorial
[Understand and optimize hyperparameters](./tutorials/4-Understand_and_optimize_hyperparameters.ipynb)).
Here, our readout layer is just a single unit, that we will receive connections from (all units of) the reservoir.
Note that only the readout layer connections are trained.
This is one of the cornerstone of all Reservoir Computing techniques. In our
case, we will train these connections using linear regression, with a regularization
coefficient of 10<sup>-5</sup>.
Now, let's connect everything using the `>>` operator.
```python
esn = reservoir >> readout
```
That's it! Next step: fit the readout weights to perform the task we want.
We will train the ESN to make one-step-ahead forecasts of our timeseries.
**Step 3: Fit and run the ESN**
We train our ESN on the first 500 timesteps of the timeseries, with 100 steps used to warm up the reservoir states.
```python
esn.fit(X[:500], X[1:501], warmup=100)
```
Our ESN is now trained and ready to use. Let's run it on the remainder of the timeseries:
```python
predictions = esn.run(X[501:-1])
```
As a shortcut, both operations can be performed in just one line!
```python
predictions = esn.fit(X[:500], X[1:501]).run(X[501:-1])
```
Let's now evaluate its performances.
**Step 4: Evaluate the ESN**
```python
from reservoirpy.observables import rmse, rsquare
print("RMSE:", rmse(X[502:], predictions), "R^2 score:", rsquare(X[502:], predictions))
```
Run and analyse this simple file (in the "tutorials/Simple Examples with Mackey-Glass" folder) to see a complete example of timeseries prediction with ESNs:
- simple_example_MackeyGlass.py (using the ESN class)
```bash
python simple_example_MackeyGlass.py
```
If you have some issues testing some examples, have a look at the [extended packages requirements in readthedocs](https://reservoirpy.readthedocs.io/en/latest/developer_guide/advanced_install.html?highlight=requirements#additional-dependencies-and-requirements).
## More installation options
To install it, use one of the following command:
```bash
pip install reservoirpy
```
or
```bash
pip install reservoirpy==0.3.5
```
If you want to run the Python Notebooks of the _tutorials_ folder, install the packages in requirements file (warning: this may downgrade the version of hyperopt installed):
```bash
pip install -r tutorials/requirements.txt
```
If you want to use the previous version 0.2.4, you can install ReservoirPy using:
```bash
pip install reservoirpy==0.2.4
```
If you want to enable the `hyper` package and its hyperparameter optimization helpers using
[hyperopt](http://hyperopt.github.io/hyperopt/), use:
```bash
pip install reservoirpy[hyper]
```
## More examples and tutorials 🎓
[Go to the tutorial folder](./tutorials/) for tutorials in Jupyter Notebooks.
[Go to the examples folder](./examples/) for examples and papers with codes, also in Jupyter Notebooks.
## Paper with tutorials
Tutorial for ReservoirPy (v0.2) can be found in this [Paper (Trouvain et al. 2020)](https://hal.inria.fr/hal-02595026).
## Explore Hyper-Parameters with Hyperopt
A quick tutorial on how to explore hyperparameters with ReservoirPy and Hyperopt can be found in this [paper (Trouvain et al. 2020)](https://hal.inria.fr/hal-02595026).
Take a look at our **advices and our method to explore hyperparameters** for reservoirs in our [recent paper: (Hinaut et al 2021)](https://hal.inria.fr/hal-03203318/) [HTML](https://link.springer.com/chapter/10.1007/978-3-030-86383-8_7) [HAL](https://hal.inria.fr/hal-03203318)
[Turorial and Jupyter Notebook for hyper-parameter exploration](./tutorials/4-Understand_and_optimize_hyperparameters.ipynb)
More info on hyperopt: [Official website](http://hyperopt.github.io/hyperopt/)
## Papers and projects using ReservoirPy
If you want your paper to appear here, please contact us (see contact link below).
- Chaix-Eichel et al. (2022) From implicit learning to explicit representations. arXiv preprint arXiv:2204.02484. [arXiv](https://arxiv.org/abs/2204.02484) [PDF](https://arxiv.org/pdf/2204.02484)
- Trouvain & Hinaut (2021) Canary Song Decoder: Transduction and Implicit Segmentation with ESNs and LTSMs. ICANN 2021 [HTML](https://link.springer.com/chapter/10.1007/978-3-030-86383-8_6) [HAL](https://hal.inria.fr/hal-03203374) [PDF](https://hal.inria.fr/hal-03203374/document)
- Pagliarini et al. (2021) Canary Vocal Sensorimotor Model with RNN Decoder and Low-dimensional GAN Generator. ICDL 2021. [HTML](https://ieeexplore.ieee.org/abstract/document/9515607?casa_token=QbpNhxjtfFQAAAAA:3klJ9jDfA0EEbckAdPFeyfIwQf5qEicaKS-U94aIIqf2q5xkX74gWJcm3w9zxYy9SYOC49mQt6vF)
- Pagliarini et al. (2021) What does the Canary Say? Low-Dimensional GAN Applied to Birdsong. HAL preprint. [HAL](https://hal.inria.fr/hal-03244723/) [PDF](https://hal.inria.fr/hal-03244723/document)
- Which Hype for My New Task? Hints and Random Search for Echo State Networks Hyperparameters. ICANN 2021 [HTML](https://link.springer.com/chapter/10.1007/978-3-030-86383-8_7) [HAL](https://hal.inria.fr/hal-03203318) [PDF](https://hal.inria.fr/hal-03203318)
## Contact
If you have a question regarding the library, please open an Issue. If you have more general question or feedback you can [contact us on twitter](https://twitter.com/reservoirpy) or by email to xavier dot hinaut the-famous-home-symbol inria dot fr.
## Citing ReservoirPy
Trouvain, N., Pedrelli, L., Dinh, T. T., Hinaut, X. (2020) Reservoirpy: an efficient and user-friendly library to design echo state networks. In International Conference on Artificial Neural Networks (pp. 494-505). Springer, Cham. [HTML](https://link.springer.com/chapter/10.1007/978-3-030-61616-8_40) [HAL](https://hal.inria.fr/hal-02595026) [PDF](https://hal.inria.fr/hal-02595026/document)
If you're using ReservoirPy in your work, please cite our package using the following bibtex entry:
```
@incollection{Trouvain2020,
doi = {10.1007/978-3-030-61616-8_40},
url = {https://doi.org/10.1007/978-3-030-61616-8_40},
year = {2020},
publisher = {Springer International Publishing},
pages = {494--505},
author = {Nathan Trouvain and Luca Pedrelli and Thanh Trung Dinh and Xavier Hinaut},
title = {{ReservoirPy}: An Efficient and User-Friendly Library to Design Echo State Networks},
booktitle = {Artificial Neural Networks and Machine Learning {\textendash} {ICANN} 2020}
}
```
<div align="left">
<img src="./static/inr_logo_rouge.jpg" width=300><br>
</div>
This package is developped and supported by Inria at Bordeaux, France in [Mnemosyne](https://team.inria.fr/mnemosyne/) group. [Inria](https://www.inria.fr/en) is a French Research Institute in Digital Sciences (Computer Science, Mathematics, Robotics, ...).
| /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/README.md | 0.793386 | 0.991907 | README.md | pypi |
from functools import wraps
from typing import Callable
import numpy as np
def _elementwise(func):
"""Vectorize a function to apply it
on arrays.
"""
vect = np.vectorize(func)
@wraps(func)
def vect_wrapper(*args, **kwargs):
u = np.asanyarray(args)
v = vect(u)
return v[0]
return vect_wrapper
def get_function(name: str) -> Callable:
"""Return an activation function from name.
Parameters
----------
name : str
Name of the activation function.
Can be one of {'softmax', 'softplus',
'sigmoid', 'tanh', 'identity', 'relu'} or
their respective short names {'smax', 'sp',
'sig', 'id', 're'}.
Returns
-------
callable
An activation function.
"""
index = {
"softmax": softmax,
"softplus": softplus,
"sigmoid": sigmoid,
"tanh": tanh,
"identity": identity,
"relu": relu,
"smax": softmax,
"sp": softplus,
"sig": sigmoid,
"id": identity,
"re": relu,
}
if index.get(name) is None:
raise ValueError(f"Function name must be one of {[k for k in index.keys()]}")
else:
return index[name]
def softmax(x: np.ndarray, beta: float = 1.0) -> np.ndarray:
"""Softmax activation function.
.. math::
y_k = \\frac{e^{\\beta x_k}}{\\sum_{i=1}^{n} e^{\\beta x_i}}
Parameters
----------
x : array
Input array.
beta: float, default to 1.0
Beta parameter of softmax.
Returns
-------
array
Activated vector.
"""
_x = np.asarray(x)
return np.exp(beta * _x) / np.exp(beta * _x).sum()
@_elementwise
def softplus(x: np.ndarray) -> np.ndarray:
"""Softplus activation function.
.. math::
f(x) = \\mathrm{ln}(1 + e^{x})
Can be used as a smooth version of ReLU.
Parameters
----------
x : array
Input array.
Returns
-------
array
Activated vector.
"""
return np.log(1 + np.exp(x))
@_elementwise
def sigmoid(x: np.ndarray) -> np.ndarray:
"""Sigmoid activation function.
.. math::
f(x) = \\frac{1}{1 + e^{-x}}
Parameters
----------
x : array
Input array.
Returns
-------
array
Activated vector.
"""
if x < 0:
u = np.exp(x)
return u / (u + 1)
return 1 / (1 + np.exp(-x))
def tanh(x: np.ndarray) -> np.ndarray:
"""Hyperbolic tangent activation function.
.. math::
f(x) = \\frac{e^x - e^{-x}}{e^x + e^{-x}}
Parameters
----------
x : array
Input array.
Returns
-------
array
Activated vector.
"""
return np.tanh(x)
@_elementwise
def identity(x: np.ndarray) -> np.ndarray:
"""Identity function.
.. math::
f(x) = x
Provided for convenience.
Parameters
----------
x : array
Input array.
Returns
-------
array
Activated vector.
"""
return x
@_elementwise
def relu(x: np.ndarray) -> np.ndarray:
"""ReLU activation function.
.. math::
f(x) = x ~~ \\mathrm{if} ~~ x > 0 ~~ \\mathrm{else} ~~ 0
Parameters
----------
x : array
Input array.
Returns
-------
array
Activated vector.
"""
if x < 0:
return 0.0
return x | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/activationsfunc.py | 0.931455 | 0.702007 | activationsfunc.py | pypi |
from contextlib import contextmanager
from copy import copy, deepcopy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from scipy.sparse import issparse
from ._base import DistantFeedback, _Node, call, check_one_sequence, check_xy, train
from .model import Model
from .type import (
BackwardFn,
Data,
EmptyInitFn,
ForwardFn,
ForwardInitFn,
PartialBackFn,
Shape,
global_dtype,
)
from .utils import progress
from .utils.model_utils import to_ragged_seq_set
from .utils.parallel import clean_tempfile, memmap_buffer
from .utils.validation import check_vector
def _init_with_sequences(node, X, Y=None):
"""Initialize a Node with a sequence of inputs/targets."""
X = to_ragged_seq_set(X)
if Y is not None:
Y = to_ragged_seq_set(Y)
else:
Y = [None for _ in range(len(X))]
if not node.is_initialized:
node.initialize(X[0], Y[0])
return X, Y
def _init_vectors_placeholders(node, x, y):
msg = f"Impossible to initialize node {node.name}: "
in_msg = (
msg + "input_dim is unknown and no input data x was given "
"to call/run the node."
)
x_init = y_init = None
if isinstance(x, np.ndarray):
x_init = np.atleast_2d(check_vector(x, caller=node))
elif isinstance(x, list):
x_init = list()
for i in range(len(x)):
x_init.append(np.atleast_2d(check_vector(x[i], caller=node)))
elif x is None:
if node.input_dim is not None:
if hasattr(node.input_dim, "__iter__"):
x_init = [np.empty((1, d)) for d in node.input_dim]
else:
x_init = np.empty((1, node.input_dim))
else:
raise RuntimeError(in_msg)
if y is not None:
y_init = np.atleast_2d(check_vector(y, caller=node))
elif node.output_dim is not None:
y_init = np.empty((1, node.output_dim))
else:
# check if output dimension can be inferred from a teacher node
if node._teacher is not None and node._teacher.output_dim is not None:
y_init = np.empty((1, node._teacher.output_dim))
return x_init, y_init
def _partial_backward_default(node, X_batch, Y_batch=None):
"""By default, for offline learners, partial_fit simply stores inputs and
targets, waiting for fit to be called."""
input_dim = node.input_dim
if not hasattr(node.input_dim, "__iter__"):
input_dim = (input_dim,)
output_dim = node.output_dim
if not hasattr(node.output_dim, "__iter__"):
output_dim = (output_dim,)
if isinstance(X_batch, np.ndarray):
if X_batch.shape[1:] == input_dim:
node._X.append(X_batch)
elif X_batch.shape[2:] == input_dim:
node._X.append([X_batch[i] for i in range(len(X_batch))])
else:
node._X.extend(X_batch)
if Y_batch is not None:
if isinstance(Y_batch, np.ndarray):
if Y_batch.shape[:1] == output_dim:
node._Y.append(Y_batch)
elif Y_batch.shape[:2] == input_dim:
node._Y.append([Y_batch[i] for i in range(len(Y_batch))])
else:
node._Y.extend(Y_batch)
return
def _initialize_feedback_default(node, fb):
"""Void feedback initializer. Works in any case."""
fb_dim = None
if isinstance(fb, list):
fb_dim = tuple([fb.shape[1] for fb in fb])
elif isinstance(fb, np.ndarray):
fb_dim = fb.shape[1]
node.set_feedback_dim(fb_dim)
class Node(_Node):
"""Node base class.
Parameters
----------
params : dict, optional
Parameters of the Node. Parameters are mutable, and can be modified
through learning or by the effect of hyperparameters.
hypers : dict, optional
Hyperparameters of the Node. Hyperparameters are immutable, and define
the architecture and properties of the Node.
forward : callable, optional
A function defining the computation performed by the Node on some data
point :math:`x_t`, and that would update the Node internal state from
:math:`s_t` to :math:`s_{t+1}`.
backward : callable, optional
A function defining an offline learning rule, applied on a whole
dataset, or on pre-computed values stored in buffers.
partial_backward : callable, optional
A function defining an offline learning rule, applied on a single batch
of data.
train : callable, optional
A function defining an online learning, applied on a single step of
a sequence or of a timeseries.
initializer : callable, optional
A function called at first run of the Node, defining the dimensions and
values of its parameters based on the dimension of input data and its
hyperparameters.
fb_initializer : callable, optional
A function called at first run of the Node, defining the dimensions and
values of its parameters based on the dimension of data received as
a feedback from another Node.
buffers_initializer : callable, optional
A function called at the begining of an offline training session to
create buffers used to store intermediate results, for batch or
multisequece offline learning.
input_dim : int
Input dimension of the Node.
output_dim : int
Output dimension of the Node. Dimension of its state.
feedback_dim :
Dimension of the feedback signal received by the Node.
name : str
Name of the Node. It must be a unique identifier.
See also
--------
Model
Object used to compose node operations and create computational
graphs.
"""
_name: str
_state: Optional[np.ndarray]
_state_proxy: Optional[np.ndarray]
_feedback: Optional[DistantFeedback]
_teacher: Optional[DistantFeedback]
_params: Dict[str, Any]
_hypers: Dict[str, Any]
_buffers: Dict[str, Any]
_input_dim: int
_output_dim: int
_feedback_dim: int
_forward: ForwardFn
_backward: BackwardFn
_partial_backward: PartialBackFn
_train: PartialBackFn
_initializer: ForwardInitFn
_buffers_initializer: EmptyInitFn
_feedback_initializer: ForwardInitFn
_trainable: bool
_fitted: bool
_X: List # For partial_fit default behavior (store first, then fit)
_Y: List
def __init__(
self,
params: Dict[str, Any] = None,
hypers: Dict[str, Any] = None,
forward: ForwardFn = None,
backward: BackwardFn = None,
partial_backward: PartialBackFn = _partial_backward_default,
train: PartialBackFn = None,
initializer: ForwardInitFn = None,
fb_initializer: ForwardInitFn = _initialize_feedback_default,
buffers_initializer: EmptyInitFn = None,
input_dim: int = None,
output_dim: int = None,
feedback_dim: int = None,
name: str = None,
dtype: np.dtype = global_dtype,
*args,
**kwargs,
):
self._params = dict() if params is None else params
self._hypers = dict() if hypers is None else hypers
# buffers are all node state components that should not live
# outside the node training loop, like partial computations for
# linear regressions. They can also be shared across multiple processes
# when needed.
self._buffers = dict()
self._forward = forward
self._backward = backward
self._partial_backward = partial_backward
self._train = train
self._initializer = initializer
self._feedback_initializer = fb_initializer
self._buffers_initializer = buffers_initializer
self._input_dim = input_dim
self._output_dim = output_dim
self._feedback_dim = feedback_dim
self._name = self._get_name(name)
self._dtype = dtype
self._is_initialized = False
self._is_fb_initialized = False
self._state_proxy = None
self._feedback = None
self._teacher = None
self._fb_flag = True # flag is used to trigger distant feedback model update
self._trainable = self._backward is not None or self._train is not None
self._fitted = False if self.is_trainable and self.is_trained_offline else True
self._X, self._Y = [], []
def __lshift__(self, other) -> "_Node":
return self.link_feedback(other)
def __ilshift__(self, other) -> "_Node":
return self.link_feedback(other, inplace=True)
def __iand__(self, other):
raise TypeError(
f"Impossible to merge nodes inplace: {self} is not a Model instance."
)
def _flag_feedback(self):
self._fb_flag = not self._fb_flag
def _unregister_teacher(self):
self._teacher = None
@property
def input_dim(self):
"""Node input dimension."""
return self._input_dim
@property
def output_dim(self):
"""Node output and internal state dimension."""
return self._output_dim
@property
def feedback_dim(self):
"""Node feedback signal dimension."""
return self._feedback_dim
@property
def is_initialized(self):
"""Returns if the Node is initialized or not."""
return self._is_initialized
@property
def has_feedback(self):
"""Returns if the Node receives feedback or not."""
return self._feedback is not None
@property
def is_trained_offline(self):
"""Returns if the Node can be fitted offline or not."""
return self.is_trainable and self._backward is not None
@property
def is_trained_online(self):
"""Returns if the Node can be trained online or not."""
return self.is_trainable and self._train is not None
@property
def is_trainable(self):
"""Returns if the Node can be trained."""
return self._trainable
@is_trainable.setter
def is_trainable(self, value: bool):
"""Freeze or unfreeze the Node. If set to False,
learning is stopped."""
if self.is_trained_offline or self.is_trained_online:
if type(value) is bool:
self._trainable = value
else:
raise TypeError("'is_trainable' must be a boolean.")
@property
def fitted(self):
"""Returns if the Node parameters have fitted already, using an
offline learning rule. If the node is trained online, returns True."""
return self._fitted
@property
def is_fb_initialized(self):
"""Returns if the Node feedback intializer has been called already."""
return self._is_fb_initialized
@property
def dtype(self):
"""Numpy numerical type of node parameters."""
return self._dtype
@property
def unsupervised(self):
return False
def state(self) -> Optional[np.ndarray]:
"""Node current internal state.
Returns
-------
array of shape (1, output_dim), optional
Internal state of the Node.
"""
if not self.is_initialized:
return None
return self._state
def state_proxy(self) -> Optional[np.ndarray]:
"""Returns the internal state freezed to be sent to other Nodes,
connected through a feedback connection. This prevents any change
occuring on the Node before feedback have reached the other Node to
propagate to the other Node to early.
Returns
-------
array of shape (1, output_dim), optional
Internal state of the Node.
"""
if self._state_proxy is None:
return self._state
return self._state_proxy
def feedback(self) -> np.ndarray:
"""State of the Nodes connected to this Node through feedback
connections.
Returns
-------
array-like of shape ([n_feedbacks], 1, feedback_dim), optional
State of the feedback Nodes, i.e. the feedback signal.
"""
if self.has_feedback:
return self._feedback()
else:
raise RuntimeError(
f"Node {self} is not connected to any feedback Node or Model."
)
def set_state_proxy(self, value: np.ndarray = None):
"""Change the freezed state of the Node. Used internaly to send
the current state to feedback receiver Nodes during the next call.
Parameters
----------
value : array of shape (1, output_dim)
State to freeze, waiting to be sent to feedback receivers.
"""
if value is not None:
if self.is_initialized:
value = check_one_sequence(
value, self.output_dim, allow_timespans=False, caller=self
).astype(self.dtype)
self._state_proxy = value
else:
raise RuntimeError(f"{self.name} is not intialized yet.")
def set_input_dim(self, value: int):
"""Set the input dimension of the Node. Can only be called once,
during Node initialization."""
if not self._is_initialized:
if self._input_dim is not None and value != self._input_dim:
raise ValueError(
f"Imposible to use {self.name} with input "
f"data of dimension {value}. Node has input "
f"dimension {self._input_dim}."
)
self._input_dim = value
else:
raise TypeError(
f"Input dimension of {self.name} is immutable after initialization."
)
def set_output_dim(self, value: int):
"""Set the output dimension of the Node. Can only be called once,
during Node initialization."""
if not self._is_initialized:
if self._output_dim is not None and value != self._output_dim:
raise ValueError(
f"Imposible to use {self.name} with target "
f"data of dimension {value}. Node has output "
f"dimension {self._output_dim}."
)
self._output_dim = value
else:
raise TypeError(
f"Output dimension of {self.name} is immutable after initialization."
)
def set_feedback_dim(self, value: int):
"""Set the feedback dimension of the Node. Can only be called once,
during Node initialization."""
if not self.is_fb_initialized:
self._feedback_dim = value
else:
raise TypeError(
f"Output dimension of {self.name} is immutable after initialization."
)
def get_param(self, name: str):
"""Get one of the parameters or hyperparmeters given its name."""
if name in self._params:
return self._params.get(name)
elif name in self._hypers:
return self._hypers.get(name)
else:
raise AttributeError(f"No attribute named '{name}' found in node {self}")
def set_param(self, name: str, value: Any):
"""Set the value of a parameter.
Parameters
----------
name : str
Parameter name.
value : array-like
Parameter new value.
"""
if name in self._params:
if hasattr(value, "dtype"):
if issparse(value):
value.data = value.data.astype(self.dtype)
else:
value = value.astype(self.dtype)
self._params[name] = value
elif name in self._hypers:
self._hypers[name] = value
else:
raise KeyError(
f"No param named '{name}' "
f"in {self.name}. Available params are: "
f"{list(self._params.keys())}."
)
def create_buffer(
self, name: str, shape: Shape = None, data: np.ndarray = None, as_memmap=True
):
"""Create a buffer array on disk, using numpy.memmap. This can be
used to store transient variables on disk. Typically, called inside
a `buffers_initializer` function.
Parameters
----------
name : str
Name of the buffer array.
shape : tuple of int, optional
Shape of the buffer array.
data : array-like
Data to store in the buffer array.
"""
if as_memmap:
self._buffers[name] = memmap_buffer(self, data=data, shape=shape, name=name)
else:
if data is not None:
self._buffers[name] = data
else:
self._buffers[name] = np.empty(shape)
def set_buffer(self, name: str, value: np.ndarray):
"""Dump data in the buffer array.
Parameters
----------
name : str
Name of the buffer array.
value : array-like
Data to store in the buffer array.
"""
self._buffers[name][:] = value.astype(self.dtype)
def get_buffer(self, name) -> np.memmap:
"""Get data from a buffer array.
Parameters
----------
name : str
Name of the buffer array.
Returns
-------
numpy.memmap
Data as Numpy memory map.
"""
if self._buffers.get(name) is None:
raise AttributeError(f"No buffer named '{name}' in {self}.")
return self._buffers[name]
def initialize(self, x: Data = None, y: Data = None) -> "Node":
"""Call the Node initializers on some data points.
Initializers are functions called at first run of the Node,
defining the dimensions and values of its parameters based on the
dimension of some input data and its hyperparameters.
Data point `x` is used to infer the input dimension of the Node.
Data point `y` is used to infer the output dimension of the Node.
Parameters
----------
x : array-like of shape ([n_inputs], 1, input_dim)
Input data.
y : array-like of shape (1, output_dim)
Ground truth data. Used to infer output dimension
of trainable nodes.
Returns
-------
Node
Initialized Node.
"""
if not self.is_initialized:
x_init, y_init = _init_vectors_placeholders(self, x, y)
self._initializer(self, x=x_init, y=y_init)
self.reset()
self._is_initialized = True
return self
def initialize_feedback(self) -> "Node":
"""Call the Node feedback initializer. The feedback initializer will
determine feedback dimension given some feedback signal, and intialize
all parameters related to the feedback connection.
Feedback sender Node must be initialized, as the feedback intializer
will probably call the :py:meth:`Node.feedback` method to get
a sample of feedback signal.
Returns
-------
Node
Initialized Node.
"""
if self.has_feedback:
if not self.is_fb_initialized:
self._feedback.initialize()
self._feedback_initializer(self, self.zero_feedback())
self._is_fb_initialized = True
return self
def initialize_buffers(self) -> "Node":
"""Call the Node buffer initializer. The buffer initializer will create
buffer array on demand to store transient values of the parameters,
typically during training.
Returns
-------
Node
Initialized Node.
"""
if self._buffers_initializer is not None:
if len(self._buffers) == 0:
self._buffers_initializer(self)
return self
def clean_buffers(self):
"""Clean Node's buffer arrays."""
if len(self._buffers) > 0:
self._buffers = dict()
clean_tempfile(self)
# Empty possibly stored inputs and targets in default buffer.
self._X = self._Y = []
def reset(self, to_state: np.ndarray = None) -> "Node":
"""Reset the last state saved to zero or to
another state value `to_state`.
Parameters
----------
to_state : array of shape (1, output_dim), optional
New state value.
Returns
-------
Node
Reset Node.
"""
if to_state is None:
self._state = self.zero_state()
else:
self._state = check_one_sequence(
to_state, self.output_dim, allow_timespans=False, caller=self
).astype(self.dtype)
return self
@contextmanager
def with_state(
self, state: np.ndarray = None, stateful: bool = False, reset: bool = False
) -> "Node":
"""Modify the state of the Node using a context manager.
The modification will have effect only within the context defined,
before the state returns back to its previous value.
Parameters
----------
state : array of shape (1, output_dim), optional
New state value.
stateful : bool, default to False
If set to True, then all modifications made in the context manager
will remain after leaving the context.
reset : bool, default to False
If True, the Node will be reset using its :py:meth:`Node.reset`
method.
Returns
-------
Node
Modified Node.
"""
if not self._is_initialized:
raise RuntimeError(
f"Impossible to set state of node {self.name}: node"
f"is not initialized yet."
)
current_state = self._state
if state is None:
if reset:
state = self.zero_state()
else:
state = current_state
self.reset(to_state=state)
yield self
if not stateful:
self._state = current_state
@contextmanager
def with_feedback(
self, feedback: np.ndarray = None, stateful=False, reset=False
) -> "Node":
"""Modify the feedback received or sent by the Node using
a context manager.
The modification will have effect only within the context defined,
before the feedback returns to its previous state.
If the Node is receiving feedback, then this function will alter the
state of the Node connected to it through feedback connections.
If the Node is sending feedback, then this function will alter the
state (or state proxy, see :py:meth:`Node.state_proxy`) of the Node.
Parameters
----------
feedback : array of shape (1, feedback_dim), optional
New feedback signal.
stateful : bool, default to False
If set to True, then all modifications made in the context manager
will remain after leaving the context.
reset : bool, default to False
If True, the feedback will be reset to zero.
Returns
-------
Node
Modified Node.
"""
if self.has_feedback:
if reset:
feedback = self.zero_feedback()
if feedback is not None:
self._feedback.clamp(feedback)
yield self
else: # maybe a feedback sender then ?
current_state_proxy = self._state_proxy
if feedback is None:
if reset:
feedback = self.zero_state()
else:
feedback = current_state_proxy
self.set_state_proxy(feedback)
yield self
if not stateful:
self._state_proxy = current_state_proxy
def zero_state(self) -> np.ndarray:
"""A null state vector."""
if self.output_dim is not None:
return np.zeros((1, self.output_dim), dtype=self.dtype)
def zero_feedback(self) -> Optional[Union[List[np.ndarray], np.ndarray]]:
"""A null feedback vector. Returns None if the Node receives
no feedback."""
if self._feedback is not None:
return self._feedback.zero_feedback()
return None
def link_feedback(
self, node: _Node, inplace: bool = False, name: str = None
) -> "_Node":
"""Create a feedback connection between the Node and another Node or
Model.
Parameters
----------
node : Node or Model
Feedback sender Node or Model.
inplace : bool, default to False
If False, then this function returns a copy of the current Node
with feedback enabled. If True, feedback is directly added to the
current Node.
name : str, optional
Name of the node copy, if `inplace` is False.
Returns
-------
Node
A Node with a feedback connection.
"""
from .ops import link_feedback
return link_feedback(self, node, inplace=inplace, name=name)
def call(
self,
x: Data,
from_state: np.ndarray = None,
stateful: bool = True,
reset: bool = False,
) -> np.ndarray:
"""Call the Node forward function on a single step of data.
Can update the state of the
Node.
Parameters
----------
x : array of shape ([n_inputs], 1, input_dim)
One single step of input data.
from_state : array of shape (1, output_dim), optional
Node state value to use at begining of computation.
stateful : bool, default to True
If True, Node state will be updated by this operation.
reset : bool, default to False
If True, Node state will be reset to zero before this operation.
Returns
-------
array of shape (1, output_dim)
An output vector.
"""
x, _ = check_xy(
self,
x,
allow_timespans=False,
allow_n_sequences=False,
)
if not self._is_initialized:
self.initialize(x)
return call(self, x, from_state=from_state, stateful=stateful, reset=reset)
def run(self, X: np.array, from_state=None, stateful=True, reset=False):
"""Run the Node forward function on a sequence of data.
Can update the state of the
Node several times.
Parameters
----------
X : array-like of shape ([n_inputs], timesteps, input_dim)
A sequence of data of shape (timesteps, features).
from_state : array of shape (1, output_dim), optional
Node state value to use at begining of computation.
stateful : bool, default to True
If True, Node state will be updated by this operation.
reset : bool, default to False
If True, Node state will be reset to zero before this operation.
Returns
-------
array of shape (timesteps, output_dim)
A sequence of output vectors.
"""
X_, _ = check_xy(
self,
X,
allow_n_sequences=False,
)
if isinstance(X_, np.ndarray):
if not self._is_initialized:
self.initialize(np.atleast_2d(X_[0]))
seq_len = X_.shape[0]
else: # multiple inputs ?
if not self._is_initialized:
self.initialize([np.atleast_2d(x[0]) for x in X_])
seq_len = X_[0].shape[0]
with self.with_state(from_state, stateful=stateful, reset=reset):
states = np.zeros((seq_len, self.output_dim))
for i in progress(range(seq_len), f"Running {self.name}: "):
if isinstance(X_, (list, tuple)):
x = [np.atleast_2d(Xi[i]) for Xi in X_]
else:
x = np.atleast_2d(X_[i])
s = call(self, x)
states[i, :] = s
return states
def train(
self,
X: np.ndarray,
Y: Union[_Node, np.ndarray] = None,
force_teachers: bool = True,
call: bool = True,
learn_every: int = 1,
from_state: np.ndarray = None,
stateful: bool = True,
reset: bool = False,
) -> np.ndarray:
"""Train the Node parameters using an online learning rule, if
available.
Parameters
----------
X : array-like of shape ([n_inputs], timesteps, input_dim)
Input sequence of data.
Y : array-like of shape (timesteps, output_dim), optional.
Target sequence of data. If None, the Node will search a feedback
signal, or train in an unsupervised way, if possible.
force_teachers : bool, default to True
If True, this Node will broadcast the available ground truth signal
to all Nodes using this Node as a feedback sender. Otherwise,
the real state of this Node will be sent to the feedback receivers.
call : bool, default to True
It True, call the Node and update its state before applying the
learning rule. Otherwise, use the train method
on the current state.
learn_every : int, default to 1
Time interval at which training must occur, when dealing with a
sequence of input data. By default, the training method is called
every time the Node receive an input.
from_state : array of shape (1, output_dim), optional
Node state value to use at begining of computation.
stateful : bool, default to True
If True, Node state will be updated by this operation.
reset : bool, default to False
If True, Node state will be reset to zero before this operation.
Returns
-------
array of shape (timesteps, output_dim)
All outputs computed during the training. If `call` is False,
outputs will be the result of :py:meth:`Node.zero_state`.
"""
if not self.is_trained_online:
raise TypeError(f"Node {self} has no online learning rule implemented.")
X_, Y_ = check_xy(
self,
X,
Y,
allow_n_sequences=False,
allow_n_inputs=False,
)
if not self._is_initialized:
x_init = np.atleast_2d(X_[0])
y_init = None
if hasattr(Y, "__iter__"):
y_init = np.atleast_2d(Y_[0])
self.initialize(x=x_init, y=y_init)
self.initialize_buffers()
states = train(
self,
X_,
Y_,
call_node=call,
force_teachers=force_teachers,
learn_every=learn_every,
from_state=from_state,
stateful=stateful,
reset=reset,
)
self._unregister_teacher()
return states
def partial_fit(
self,
X_batch: Data,
Y_batch: Data = None,
warmup=0,
**kwargs,
) -> "Node":
"""Partial offline fitting method of a Node.
Can be used to perform batched fitting or to precompute some variables
used by the fitting method.
Parameters
----------
X_batch : array-like of shape ([n_inputs], [series], timesteps, input_dim)
A sequence or a batch of sequence of input data.
Y_batch : array-like of shape ([series], timesteps, output_dim), optional
A sequence or a batch of sequence of teacher signals.
warmup : int, default to 0
Number of timesteps to consider as warmup and
discard at the begining of each timeseries before training.
Returns
-------
Node
Partially fitted Node.
"""
if not self.is_trained_offline:
raise TypeError(f"Node {self} has no offline learning rule implemented.")
X, Y = check_xy(self, X_batch, Y_batch, allow_n_inputs=False)
X, Y = _init_with_sequences(self, X, Y)
self.initialize_buffers()
for i in range(len(X)):
X_seq = X[i]
Y_seq = None
if Y is not None:
Y_seq = Y[i]
if X_seq.shape[0] <= warmup:
raise ValueError(
f"Warmup set to {warmup} timesteps, but one timeseries is only "
f"{X_seq.shape[0]} long."
)
if Y_seq is not None:
self._partial_backward(self, X_seq[warmup:], Y_seq[warmup:], **kwargs)
else:
self._partial_backward(self, X_seq[warmup:], **kwargs)
return self
def fit(self, X: Data = None, Y: Data = None, warmup=0) -> "Node":
"""Offline fitting method of a Node.
Parameters
----------
X : array-like of shape ([n_inputs], [series], timesteps, input_dim), optional
Input sequences dataset. If None, the method will try to fit
the parameters of the Node using the precomputed values returned
by previous call of :py:meth:`partial_fit`.
Y : array-like of shape ([series], timesteps, output_dim), optional
Teacher signals dataset. If None, the method will try to fit
the parameters of the Node using the precomputed values returned
by previous call of :py:meth:`partial_fit`, or to fit the Node in
an unsupervised way, if possible.
warmup : int, default to 0
Number of timesteps to consider as warmup and
discard at the begining of each timeseries before training.
Returns
-------
Node
Node trained offline.
"""
if not self.is_trained_offline:
raise TypeError(f"Node {self} has no offline learning rule implemented.")
self._fitted = False
# Call the partial backward function on the dataset if it is
# provided all at once.
if X is not None:
if self._partial_backward is not None:
self.partial_fit(X, Y, warmup=warmup)
elif not self._is_initialized:
raise RuntimeError(
f"Impossible to fit node {self.name}: node"
f"is not initialized, and fit was called "
f"without input and teacher data."
)
self._backward(self, self._X, self._Y)
self._fitted = True
self.clean_buffers()
return self
def copy(
self, name: str = None, copy_feedback: bool = False, shallow: bool = False
):
"""Returns a copy of the Node.
Parameters
----------
name : str
Name of the Node copy.
copy_feedback : bool, default to False
If True, also copy the Node feedback senders.
shallow : bool, default to False
If False, performs a deep copy of the Node.
Returns
-------
Node
A copy of the Node.
"""
if shallow:
new_obj = copy(self)
else:
if self.has_feedback:
# store feedback node
fb = self._feedback
# temporarily remove it
self._feedback = None
# copy and restore feedback, deep copy of feedback depends
# on the copy_feedback parameter only
new_obj = deepcopy(self)
new_obj._feedback = fb
self._feedback = fb
else:
new_obj = deepcopy(self)
if copy_feedback:
if self.has_feedback:
fb_copy = deepcopy(self._feedback)
new_obj._feedback = fb_copy
n = self._get_name(name)
new_obj._name = n
return new_obj
class Unsupervised(Node):
@property
def unsupervised(self):
return True | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/node.py | 0.888964 | 0.31542 | node.py | pypi |
import copy
import warnings
from functools import partial
from typing import Callable, Iterable, Union
import numpy as np
from numpy.random import Generator
from scipy import sparse, stats
from scipy.sparse.linalg import ArpackNoConvergence
from .observables import spectral_radius
from .type import global_dtype
from .utils.random import rand_generator
__all__ = [
"fast_spectral_initialization",
"generate_internal_weights",
"generate_input_weights",
"random_sparse",
"uniform",
"normal",
"bernoulli",
"zeros",
"ones",
]
_epsilon = 1e-8 # used to avoid division by zero when rescaling spectral radius
def _filter_deprecated_kwargs(kwargs):
deprecated = {
"proba": "connectivity",
"typefloat": "dtype",
"N": None,
"dim_input": None,
}
new_kwargs = {}
args = [None, None]
args_order = ["N", "dim_input"]
for depr, repl in deprecated.items():
if depr in kwargs:
depr_argument = kwargs.pop(depr)
msg = f"'{depr}' parameter is deprecated since v0.3.1."
if repl is not None:
msg += f" Consider using '{repl}' instead."
new_kwargs[repl] = depr_argument
else:
args[args_order.index(depr)] = depr_argument
warnings.warn(msg, DeprecationWarning)
args = [a for a in args if a is not None]
kwargs.update(new_kwargs)
return args, kwargs
class Initializer:
"""Base class for initializer functions. Allow updating initializer function
parameters several times before calling. May perform spectral radius rescaling
or input scaling as a post-processing to initializer function results.
Parameters
----------
func : callable
Initializer function. Should have a `shape` argument and return a Numpy array
or Scipy sparse matrix.
autorize_sr : bool, default to True
Autorize spectral radius rescaling for this initializer.
autorize_input_scaling : bool, default to True
Autorize input_scaling for this initializer.
autorize_rescaling : bool, default to True
Autorize any kind of rescaling (spectral radius or input scaling) for this
initializer.
Example
-------
>>> from reservoirpy.mat_gen import random_sparse
>>> init_func = random_sparse(dist="uniform")
>>> init_func = init_func(connectivity=0.1)
>>> init_func(5, 5) # actually creates the matrix
>>> random_sparse(5, 5, dist="uniform", connectivity=0.1) # also creates the matrix
"""
def __init__(
self,
func,
autorize_sr=True,
autorize_input_scaling=True,
autorize_rescaling=True,
):
self._func = func
self._kwargs = dict()
self._autorize_sr = autorize_sr
self._autorize_input_scaling = autorize_input_scaling
self._autorize_rescaling = autorize_rescaling
self.__doc__ = func.__doc__
self.__annotations__ = func.__annotations__
if self._autorize_sr:
self.__annotations__.update({"sr": float})
if self._autorize_input_scaling:
self.__annotations__.update(
{"input_scaling": Union[float, Iterable[float]]}
)
def __repr__(self):
split = super().__repr__().split(" ")
return split[0] + f" ({self._func.__name__}) " + " ".join(split[1:])
def __call__(self, *shape, **kwargs):
if "sr" in kwargs and not self._autorize_sr:
raise ValueError(
"Spectral radius rescaling is not supported by this initializer."
)
if "input_scaling" in kwargs and not self._autorize_input_scaling:
raise ValueError("Input scaling is not supported by this initializer.")
new_shape, kwargs = _filter_deprecated_kwargs(kwargs)
if len(new_shape) > 1:
shape = new_shape
elif len(new_shape) > 0:
shape = (new_shape[0], new_shape[0])
init = copy.deepcopy(self)
init._kwargs.update(kwargs)
if len(shape) > 0:
if init._autorize_rescaling:
return init._func_post_process(*shape, **init._kwargs)
else:
return init._func(*shape, **init._kwargs)
else:
if len(kwargs) > 0:
return init
else:
return init._func(**init._kwargs) # should raise, shape is None
def _func_post_process(self, *shape, sr=None, input_scaling=None, **kwargs):
"""Post process initializer with spectral radius or input scaling factors."""
if sr is not None and input_scaling is not None:
raise ValueError(
"'sr' and 'input_scaling' parameters are mutually exclusive for a "
"given matrix."
)
if sr is not None:
return _scale_spectral_radius(self._func, shape, sr, **kwargs)
elif input_scaling is not None:
return _scale_inputs(self._func, shape, input_scaling, **kwargs)
else:
return self._func(*shape, **kwargs)
def _get_rvs(dist: str, random_state: Generator, **kwargs) -> Callable:
"""Get a scipy.stats random variable generator.
Parameters
----------
dist : str
A scipy.stats distribution.
random_state : Generator
A Numpy random generator.
Returns
-------
scipy.stats.rv_continuous or scipy.stats.rv_discrete
A scipy.stats random variable generator.
"""
if dist == "custom_bernoulli":
return _bernoulli_discrete_rvs(**kwargs, random_state=random_state)
elif dist in dir(stats):
distribution = getattr(stats, dist)
return partial(distribution(**kwargs).rvs, random_state=random_state)
else:
raise ValueError(
f"'{dist}' is not a valid distribution name. "
"See 'scipy.stats' for all available distributions."
)
def _bernoulli_discrete_rvs(
p=0.5, value: float = 1.0, random_state: Union[Generator, int] = None
) -> Callable:
"""Generator of Bernoulli random variables, equal to +value or -value.
Parameters
----------
p : float, default to 0.5
Probability of single success (+value). Single failure (-value) probability
is (1-p).
value : float, default to 1.0
Success value. Failure value is equal to -value.
Returns
-------
callable
A random variable generator.
"""
rg = rand_generator(random_state)
def rvs(size: int = 1):
return rg.choice([value, -value], p=[p, 1 - p], replace=True, size=size)
return rvs
def _scale_spectral_radius(w_init, shape, sr, **kwargs):
"""Change the spectral radius of a matrix created with an
initializer.
Parameters
----------
w_init : Initializer
An initializer.
shape : tuple of int
Shape of the matrix.
sr : float
New spectral radius.
seed: int or Generator
A random generator or an integer seed.
Returns
-------
Numpy array or Scipy sparse matrix
Rescaled matrix.
"""
convergence = False
if "seed" in kwargs:
seed = kwargs.pop("seed")
else:
seed = None
rg = rand_generator(seed)
w = w_init(*shape, seed=seed, **kwargs)
while not convergence:
# make sure the eigenvalues are reachable.
# (maybe find a better way to do this on day)
try:
current_sr = spectral_radius(w)
if -_epsilon < current_sr < _epsilon:
current_sr = _epsilon # avoid div by zero exceptions.
w *= sr / current_sr
convergence = True
except ArpackNoConvergence: # pragma: no cover
if seed is None:
seed = rg.integers(1, 9999)
else:
seed = rg.integers(1, seed + 1) # never stuck at 1
w = w_init(*shape, seed=seed, **kwargs)
return w
def _scale_inputs(w_init, shape, input_scaling, **kwargs):
"""Rescale a matrix created with an initializer.
Parameters
----------
w_init : Initializer
An initializer.
shape : tuple of int
Shape of the matrix.
input_scaling : float
Scaling parameter.
Returns
-------
Numpy array or Scipy sparse matrix
Rescaled matrix.
"""
w = w_init(*shape, **kwargs)
if sparse.issparse(w):
return w.multiply(input_scaling)
else:
return np.multiply(w, input_scaling)
def _random_sparse(
*shape: int,
dist: str,
connectivity: float = 1.0,
dtype: np.dtype = global_dtype,
sparsity_type: str = "csr",
seed: Union[int, np.random.Generator] = None,
**kwargs,
):
"""Create a random matrix.
Parameters
----------
*shape : int, int, ..., optional
Shape (row, columns, ...) the matrix.
dist: str
A distribution name from :py:mod:`scipy.stats` module, such as "norm" or
"uniform". Parameters like `loc` and `scale` can be passed to the distribution
functions as keyword arguments to this function. Usual distributions for
internal weights are :py:class:`scipy.stats.norm` with parameters `loc` and
`scale` to obtain weights following the standard normal distribution,
or :py:class:`scipy.stats.uniform` with parameters `loc=-1` and `scale=2`
to obtain weights uniformly distributed between -1 and 1.
Can also have the value "custom_bernoulli". In that case, weights will be drawn
from a Bernoulli discrete random variable alternating between -1 and 1 and
drawing 1 with a probability `p` (default `p` parameter to 0.5).
connectivity: float, default to 1.0
Also called density of the sparse matrix. By default, creates dense arrays.
sr : float, optional
If defined, then will rescale the spectral radius of the matrix to this value.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
sparsity_type : {"csr", "csc", "dense"}, default to "csr"
If connectivity is inferior to 1 and shape is only 2-dimensional, then the
function will try to use one of the Scipy sparse matrix format ("csr" or "csc").
Else, a Numpy array ("dense") will be used.
seed : optional
Random generator seed. Default to the global value set with
:py:func:`reservoirpy.set_seed`.
**kwargs : optional
Arguments for the scipy.stats distribution.
Returns
-------
scipy.sparse array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
"""
if 0 < connectivity > 1.0:
raise ValueError("'connectivity' must be >0 and <1.")
rg = rand_generator(seed)
rvs = _get_rvs(dist, **kwargs, random_state=rg)
if connectivity >= 1.0 or len(shape) != 2:
matrix = rvs(size=shape).astype(dtype)
if connectivity < 1.0:
matrix[rg.random(shape) > connectivity] = 0.0
else:
matrix = sparse.random(
shape[0],
shape[1],
density=connectivity,
format=sparsity_type,
random_state=rg,
data_rvs=rvs,
dtype=dtype,
)
# sparse.random may return np.matrix if format="dense".
# Only ndarray are supported though, hence the explicit cast.
if type(matrix) is np.matrix:
matrix = np.asarray(matrix)
return matrix
random_sparse = Initializer(_random_sparse)
def _uniform(
*shape: int,
low: float = -1.0,
high: float = 1.0,
connectivity: float = 1.0,
dtype: np.dtype = global_dtype,
sparsity_type: str = "csr",
seed: Union[int, np.random.Generator] = None,
):
"""Create an array with uniformly distributed values.
Parameters
----------
*shape : int, int, ..., optional
Shape (row, columns, ...) of the array.
low, high : float, float, default to -1, 1
Boundaries of the uniform distribution.
connectivity: float, default to 1.0
Also called density of the sparse matrix. By default, creates dense arrays.
sr : float, optional
If defined, then will rescale the spectral radius of the matrix to this value.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
sparsity_type : {"csr", "csc", "dense"}, default to "csr"
If connectivity is inferior to 1 and shape is only 2-dimensional, then the
function will try to use one of the Scipy sparse matrix format ("csr" or "csc").
Else, a Numpy array ("dense") will be used.
seed : optional
Random generator seed. Default to the global value set with
:py:func:`reservoirpy.set_seed`.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
"""
if high < low:
raise ValueError("'high' boundary must be > to 'low' boundary.")
return _random_sparse(
*shape,
dist="uniform",
loc=low,
scale=high - low,
connectivity=connectivity,
dtype=dtype,
sparsity_type=sparsity_type,
seed=seed,
)
uniform = Initializer(_uniform)
def _normal(
*shape: int,
loc: float = 0.0,
scale: float = 1.0,
connectivity: float = 1.0,
dtype: np.dtype = global_dtype,
sparsity_type: str = "csr",
seed: Union[int, np.random.Generator] = None,
):
"""Create an array with values distributed following a Gaussian distribution.
Parameters
----------
*shape : int, int, ..., optional
Shape (row, columns, ...) of the array.
loc, scale : float, float, default to 0, 1
Mean and scale of the Gaussian distribution.
connectivity: float, default to 1.0
Also called density of the sparse matrix. By default, creates dense arrays.
sr : float, optional
If defined, then will rescale the spectral radius of the matrix to this value.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
sparsity_type : {"csr", "csc", "dense"}, default to "csr"
If connectivity is inferior to 1 and shape is only 2-dimensional, then the
function will try to use one of the Scipy sparse matrix format ("csr" or "csc").
Else, a Numpy array ("dense") will be used.
seed : optional
Random generator seed. Default to the global value set with
:py:func:`reservoirpy.set_seed`.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
"""
return _random_sparse(
*shape,
dist="norm",
loc=loc,
scale=scale,
connectivity=connectivity,
dtype=dtype,
sparsity_type=sparsity_type,
seed=seed,
)
normal = Initializer(_normal)
def _bernoulli(
*shape: int,
p: float = 0.5,
connectivity: float = 1.0,
dtype: np.dtype = global_dtype,
sparsity_type: str = "csr",
seed: Union[int, np.random.Generator] = None,
):
"""Create an array with values equal to either 1 or -1. Probability of success
(to obtain 1) is equal to p.
Parameters
----------
*shape : int, int, ..., optional
Shape (row, columns, ...) of the array.
p : float, default to 0.5
Probability of success (to obtain 1).
connectivity: float, default to 1.0
Also called density of the sparse matrix. By default, creates dense arrays.
sr : float, optional
If defined, then will rescale the spectral radius of the matrix to this value.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
sparsity_type : {"csr", "csc", "dense"}, default to "csr"
If connectivity is inferior to 1 and shape is only 2-dimensional, then the
function will try to use one of the Scipy sparse matrix format ("csr" or "csc").
Else, a Numpy array ("dense") will be used.
seed : optional
Random generator seed. Default to the global value set with
:py:func:`reservoirpy.set_seed`.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
"""
if 1 < p < 0:
raise ValueError("'p' must be <= 1 and >= 0.")
return _random_sparse(
*shape,
p=p,
dist="custom_bernoulli",
connectivity=connectivity,
dtype=dtype,
sparsity_type=sparsity_type,
seed=seed,
)
bernoulli = Initializer(_bernoulli)
def _ones(*shape: int, dtype: np.dtype = global_dtype, **kwargs):
"""Create an array filled with 1.
Parameters
----------
*shape : int, int, ..., optional
Shape (row, columns, ...) of the array.
sr : float, optional
If defined, then will rescale the spectral radius of the matrix to this value.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
"""
return np.ones(shape, dtype=dtype)
ones = Initializer(_ones)
def _zeros(*shape: int, dtype: np.dtype = global_dtype, **kwargs):
"""Create an array filled with 0.
Parameters
----------
*shape : int, int, ..., optional
Shape (row, columns, ...) of the array.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
Note
----
`sr` parameter is not available for this initializer. The spectral radius of a null
matrix can not be rescaled.
"""
return np.zeros(shape, dtype=dtype)
zeros = Initializer(_zeros, autorize_sr=False)
def _fast_spectral_initialization(
N: int,
*args,
sr: float = None,
connectivity: float = 1.0,
dtype: np.dtype = global_dtype,
sparsity_type: str = "csr",
seed: Union[int, np.random.Generator] = None,
):
"""Fast spectral radius (FSI) approach for weights
initialization [1]_ of square matrices.
This method is well suited for computation and rescaling of
very large weights matrices, with a number of neurons typically
above 500-1000.
Parameters
----------
N : int, optional
Shape :math:`N \\times N` of the array.
This function only builds square matrices.
connectivity: float, default to 1.0
Also called density of the sparse matrix. By default, creates dense arrays.
sr : float, optional
If defined, then will rescale the spectral radius of the matrix to this value.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
sparsity_type : {"csr", "csc", "dense"}, default to "csr"
If connectivity is inferior to 1 and shape is only 2-dimensional, then the
function will try to use one of the Scipy sparse matrix format ("csr" or "csc").
Else, a Numpy array ("dense") will be used.
seed : optional
Random generator seed. Default to the global value set with
:py:func:`reservoirpy.set_seed`.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
Note
----
This function was designed for initialization of a reservoir's internal weights.
In consequence, it can only produce square matrices. If more than one positional
argument of shape are provided, only the first will be used.
References
-----------
.. [1] C. Gallicchio, A. Micheli, and L. Pedrelli,
‘Fast Spectral Radius Initialization for Recurrent
Neural Networks’, in Recent Advances in Big Data and
Deep Learning, Cham, 2020, pp. 380–390,
doi: 10.1007/978-3-030-16841-4_39.
"""
if 0 > connectivity < 1.0:
raise ValueError("'connectivity' must be >0 and <1.")
if sr is None or connectivity <= 0.0:
a = 1
else:
a = -(6 * sr) / (np.sqrt(12) * np.sqrt((connectivity * N)))
return _uniform(
N,
N,
low=np.min((a, -a)),
high=np.max((a, -a)),
connectivity=connectivity,
dtype=dtype,
sparsity_type=sparsity_type,
seed=seed,
)
fast_spectral_initialization = Initializer(
_fast_spectral_initialization,
autorize_input_scaling=False,
autorize_rescaling=False,
)
def _generate_internal_weights(
N: int,
*args,
dist="norm",
connectivity=0.1,
dtype=global_dtype,
sparsity_type="csr",
seed=None,
**kwargs,
):
"""Generate the weight matrix that will be used for the internal connections of a
reservoir.
Warning
-------
This function is deprecated since version v0.3.1 and will be removed in future
versions. Please consider using :py:func:`normal`, :py:func:`uniform` or
:py:func:`random_sparse` instead.
Parameters
----------
N : int, optional
Shape :math:`N \\times N` of the array.
This function only builds square matrices.
dist: str, default to "norm"
A distribution name from :py:mod:`scipy.stats` module, such as "norm" or
"uniform". Parameters like `loc` and `scale` can be passed to the distribution
functions as keyword arguments to this function. Usual distributions for
internal weights are :py:class:`scipy.stats.norm` with parameters `loc` and
`scale` to obtain weights following the standard normal distribution,
or :py:class:`scipy.stats.uniform` with parameters `loc=-1` and `scale=2`
to obtain weights uniformly distributed between -1 and 1.
Can also have the value "custom_bernoulli". In that case, weights will be drawn
from a Bernoulli discrete random variable alternating between -1 and 1 and
drawing 1 with a probability `p` (default `p` parameter to 0.5).
connectivity: float, default to 0.1
Also called density of the sparse matrix.
sr : float, optional
If defined, then will rescale the spectral radius of the matrix to this value.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
sparsity_type : {"csr", "csc", "dense"}, default to "csr"
If connectivity is inferior to 1 and shape is only 2-dimensional, then the
function will try to use one of the Scipy sparse matrix format ("csr" or "csc").
Else, a Numpy array ("dense") will be used.
seed : optional
Random generator seed. Default to the global value set with
:py:func:`reservoirpy.set_seed`.
**kwargs : optional
Arguments for the scipy.stats distribution.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
"""
warnings.warn(
"'generate_internal_weights' is deprecated since v0.3.1 and will be removed in "
"future versions. Consider using 'bernoulli' or 'random_sparse'.",
DeprecationWarning,
)
return _random_sparse(
N,
N,
connectivity=connectivity,
dtype=dtype,
dist=dist,
sparsity_type=sparsity_type,
seed=seed,
**kwargs,
)
generate_internal_weights = Initializer(
_generate_internal_weights, autorize_input_scaling=False
)
def _generate_input_weights(
N,
dim_input,
dist="custom_bernoulli",
connectivity=1.0,
dtype=global_dtype,
sparsity_type="csr",
seed=None,
input_bias=False,
**kwargs,
):
"""Generate input or feedback weights for a reservoir.
Weights are drawn by default from a discrete Bernoulli random variable,
i.e. are always equal to 1 or -1. Then, they can be rescaled to a specific constant
using the `input_scaling` parameter.
Warning
-------
This function is deprecated since version v0.3.1 and will be removed in future
versions. Please consider using :py:func:`bernoulli` or :py:func:`random_sparse`
instead.
Parameters
----------
N: int
Number of units in the connected reservoir.
dim_input: int
Dimension of the inputs connected to the reservoir.
dist: str, default to "norm"
A distribution name from :py:mod:`scipy.stats` module, such as "norm" or
"uniform". Parameters like `loc` and `scale` can be passed to the distribution
functions as keyword arguments to this function. Usual distributions for
internal weights are :py:class:`scipy.stats.norm` with parameters `loc` and
`scale` to obtain weights following the standard normal distribution,
or :py:class:`scipy.stats.uniform` with parameters `loc=-1` and `scale=2`
to obtain weights uniformly distributed between -1 and 1.
Can also have the value "custom_bernoulli". In that case, weights will be drawn
from a Bernoulli discrete random variable alternating between -1 and 1 and
drawing 1 with a probability `p` (default `p` parameter to 0.5).
connectivity: float, default to 0.1
Also called density of the sparse matrix.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
input_bias: bool, optional
'input_bias' parameter is deprecated. Bias should be initialized
separately from the input matrix.
If True, will add a row to the matrix to take into
account a constant bias added to the input.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
sparsity_type : {"csr", "csc", "dense"}, default to "csr"
If connectivity is inferior to 1 and shape is only 2-dimensional, then the
function will try to use one of the Scipy sparse matrix format ("csr" or "csc").
Else, a Numpy array ("dense") will be used.
seed : optional
Random generator seed. Default to the global value set with
:py:func:`reservoirpy.set_seed`.
**kwargs : optional
Arguments for the scipy.stats distribution.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
"""
warnings.warn(
"'generate_input_weights' is deprecated since v0.3.1 and will be removed in "
"future versions. Consider using 'normal', 'uniform' or 'random_sparse'.",
DeprecationWarning,
)
if input_bias:
warnings.warn(
"'input_bias' parameter is deprecated. Bias should be initialized "
"separately from the input matrix.",
DeprecationWarning,
)
dim_input += 1
return _random_sparse(
N,
dim_input,
connectivity=connectivity,
dtype=dtype,
dist=dist,
sparsity_type=sparsity_type,
seed=seed,
**kwargs,
)
generate_input_weights = Initializer(_generate_input_weights, autorize_sr=False) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/mat_gen.py | 0.912659 | 0.312567 | mat_gen.py | pypi |
import sys
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
if sys.version_info < (3, 8):
from typing_extensions import Protocol
else:
from typing import Protocol
import numpy as np
from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
global_dtype = np.float64
global_ctype = "d"
Weights = TypeVar("Weights", np.ndarray, csr_matrix, csc_matrix, coo_matrix)
Shape = TypeVar("Shape", int, Tuple[int, ...])
Data = TypeVar("Data", Iterable[np.ndarray], np.ndarray)
MappedData = TypeVar(
"MappedData",
Iterable[np.ndarray],
np.ndarray,
Dict[str, Iterable[np.ndarray]],
Dict[str, np.ndarray],
)
class NodeType(Protocol):
"""Node base Protocol class for type checking and interface inheritance."""
name: str
params: Dict[str, Any]
hypers: Dict[str, Any]
is_initialized: bool
input_dim: Shape
output_dim: Shape
is_trained_offline: bool
is_trained_online: bool
is_trainable: bool
fitted: bool
def __call__(self, *args, **kwargs) -> np.ndarray:
...
def __rshift__(self, other: Union["NodeType", Sequence["NodeType"]]) -> "NodeType":
...
def __rrshift__(self, other: Union["NodeType", Sequence["NodeType"]]) -> "NodeType":
...
def __and__(self, other: Union["NodeType", Sequence["NodeType"]]) -> "NodeType":
...
def get_param(self, name: str) -> Any:
...
def initialize(self, x: MappedData = None, y: MappedData = None):
...
def reset(self, to_state: np.ndarray = None) -> "NodeType":
...
def with_state(
self, state=None, stateful=False, reset=False
) -> Iterator["NodeType"]:
...
def with_feedback(
self, feedback=None, stateful=False, reset=False
) -> Iterator["NodeType"]:
...
Activation = Callable[[np.ndarray], np.ndarray]
ForwardFn = Callable[[NodeType, Data], np.ndarray]
BackwardFn = Callable[[NodeType, Optional[Data], Optional[Data]], None]
PartialBackFn = Callable[[NodeType, Data, Optional[Data]], None]
ForwardInitFn = Callable[[NodeType, Optional[Data], Optional[Data]], None]
EmptyInitFn = Callable[[NodeType], None] | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/type.py | 0.562177 | 0.438725 | type.py | pypi |
import sys
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from scipy.sparse.linalg import eigs
from .type import Weights
def _check_arrays(y_true, y_pred):
y_true_array = np.asarray(y_true)
y_pred_array = np.asarray(y_pred)
if not y_true_array.shape == y_pred_array.shape:
raise ValueError(
f"Shape mismatch between y_true and y_pred: "
"{y_true_array.shape} != {y_pred_array.shape}"
)
return y_true_array, y_pred_array
def spectral_radius(W: Weights, maxiter: int = None) -> float:
"""Compute the spectral radius of a matrix `W`.
Spectral radius is defined as the maximum absolute
eigenvalue of `W`.
Parameters
----------
W : array-like (sparse or dense) of shape (N, N)
Matrix from which the spectral radius will
be computed.
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed.
By default, is equal to `W.shape[0] * 20`.
See `Scipy documentation <https://docs.scipy.org/
doc/scipy/reference/generated/scipy.sparse.linalg.eigs.html>`_
for more informations.
Returns
-------
float
Spectral radius of `W`.
Raises
------
ArpackNoConvergence
When computing spectral radius on large
sparse matrices, it is possible that the
Fortran ARPACK algorithmn used to compute
eigenvalues don't converge towards precise
values. To avoid this problem, set the `maxiter`
parameter to an higher value. Be warned that
this may drastically increase the computation
time.
"""
if issparse(W):
if maxiter is None:
maxiter = W.shape[0] * 20
return max(
abs(eigs(W, k=1, which="LM", maxiter=maxiter, return_eigenvectors=False))
)
return max(abs(linalg.eig(W)[0]))
def mse(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Mean squared error metric:
.. math::
\\frac{\\sum_{i=0}^{N-1} (y_i - \\hat{y}_i)^2}{N}
Parameters
----------
y_true : array-like of shape (N, features)
Ground truth values.
y_pred : array-like of shape (N, features)
Predicted values.
Returns
-------
float
Mean squared error.
"""
y_true_array, y_pred_array = _check_arrays(y_true, y_pred)
return float(np.mean((y_true_array - y_pred_array) ** 2))
def rmse(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Root mean squared error metric:
.. math::
\\sqrt{\\frac{\\sum_{i=0}^{N-1} (y_i - \\hat{y}_i)^2}{N}}
Parameters
----------
y_true : array-like of shape (N, features)
Ground truth values.
y_pred : array-like of shape (N, features)
Predicted values.
Returns
-------
float
Root mean squared error.
"""
return np.sqrt(mse(y_true, y_pred))
def nrmse(
y_true: np.ndarray,
y_pred: np.ndarray,
norm: Literal["minmax", "var", "mean", "q1q3"] = "minmax",
norm_value: float = None,
) -> float:
"""Normalized mean squared error metric:
.. math::
\\frac{1}{\\lambda} * \\sqrt{\\frac{\\sum_{i=0}^{N-1} (y_i - \\hat{y}_i)^2}{N}}
where :math:`\\lambda` may be:
- :math:`\\max y - \\min y` (Peak-to-peak amplitude) if ``norm="minmax"``;
- :math:`\\mathrm{Var}(y)` (variance over time) if ``norm="var"``;
- :math:`\\mathbb{E}[y]` (mean over time) if ``norm="mean"``;
- :math:`Q_{3}(y) - Q_{1}(y)` (quartiles) if ``norm="q1q3"``;
- or any value passed to ``norm_value``.
Parameters
----------
y_true : array-like of shape (N, features)
Ground truth values.
y_pred : array-like of shape (N, features)
Predicted values.
norm : {"minmax", "var", "mean", "q1q3"}, default to "minmax"
Normalization method.
norm_value : float, optional
A normalization factor. If set, will override the ``norm`` parameter.
Returns
-------
float
Normalized mean squared error.
"""
error = rmse(y_true, y_pred)
if norm_value is not None:
return error / norm_value
else:
norms = {
"minmax": lambda y: y.ptp(),
"var": lambda y: y.var(),
"mean": lambda y: y.mean(),
"q1q3": lambda y: np.quantile(y, 0.75) - np.quantile(y, 0.25),
}
if norms.get(norm) is None:
raise ValueError(
f"Unknown normalization method. "
f"Available methods are {list(norms.keys())}."
)
else:
return error / norms[norm](np.asarray(y_true))
def rsquare(y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""Coefficient of determination :math:`R^2`:
.. math::
1 - \\frac{\\sum^{N-1}_{i=0} (y - \\hat{y})^2}
{\\sum^{N-1}_{i=0} (y - \\bar{y})^2}
where :math:`\\bar{y}` is the mean value of ground truth.
Parameters
----------
y_true : array-like of shape (N, features)
Ground truth values.
y_pred : array-like of shape (N, features)
Predicted values.
Returns
-------
float
Coefficient of determination.
"""
y_true_array, y_pred_array = _check_arrays(y_true, y_pred)
d = (y_true_array - y_pred_array) ** 2
D = (y_true_array - y_pred_array.mean()) ** 2
return 1 - np.sum(d) / np.sum(D) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/observables.py | 0.856062 | 0.558267 | observables.py | pypi |
from functools import partial
import numpy as np
from ..node import Node
from ..nodes.utils import (
_initialize_readout,
_prepare_inputs_for_learning,
readout_forward,
)
def _reset_buffers(step, rTPs, factors):
step[:] = np.zeros_like(step)
rTPs[:] = np.zeros_like(rTPs)
factors[:] = np.zeros_like(factors)
def train(readout: "BatchFORCE", x=None, y=None):
if x is not None:
x, y = _prepare_inputs_for_learning(
x, y, bias=readout.has_bias, allow_reshape=True
)
W = readout.Wout
if readout.has_bias:
bias = readout.bias
W = np.c_[bias, W]
P = readout.P
r = x.T
output = readout.state()
factors = readout.get_buffer("factors")
rTPs = readout.get_buffer("rTPs")
steps = readout.get_buffer("step")
step = int(steps[0])
error = output.T - y.T
rt = r.T
rTP = (rt @ P) - (rt @ (factors * rTPs)) @ rTPs.T
factor = float(1.0 / (1.0 + rTP @ r))
factors[step] = factor
rTPs[:, step] = rTP
new_rTP = rTP * (1 - factor * (rTP @ r).item())
W -= error @ new_rTP
if readout.has_bias:
readout.set_param("Wout", W[:, 1:])
readout.set_param("bias", W[:, :1])
else:
readout.set_param("Wout", W)
step += 1
if step == readout.batch_size:
P -= (factors * rTPs) @ rTPs.T
_reset_buffers(steps, rTPs, factors)
def initialize(readout: "BatchFORCE", x=None, y=None, init_func=None, bias=None):
_initialize_readout(readout, x, y, init_func, bias)
if x is not None:
input_dim, alpha = readout.input_dim, readout.alpha
if readout.has_bias:
input_dim += 1
P = np.asmatrix(np.eye(input_dim)) / alpha
readout.set_param("P", P)
def initialize_buffers(readout: "BatchFORCE"):
bias_dim = 0
if readout.has_bias:
bias_dim = 1
readout.create_buffer("rTPs", (readout.input_dim + bias_dim, readout.batch_size))
readout.create_buffer("factors", (readout.batch_size,))
readout.create_buffer("step", (1,))
class BatchFORCE(Node):
# A special thanks to Lionel Eyraud-Dubois and
# Olivier Beaumont for their improvement of this method.
def __init__(
self,
output_dim=None,
alpha=1e-6,
batch_size=1,
Wout_init=np.zeros,
bias=True,
name=None,
):
super(BatchFORCE, self).__init__(
params={"Wout": None, "bias": None, "P": None},
hypers={"alpha": alpha, "batch_size": batch_size, "has_bias": bias},
forward=readout_forward,
train=train,
initializer=partial(initialize, init_func=Wout_init, bias=bias),
buffers_initializer=initialize_buffers,
output_dim=output_dim,
name=name,
) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/experimental/batchforce.py | 0.670824 | 0.389866 | batchforce.py | pypi |
from functools import partial
from ..activationsfunc import get_function
from ..node import Node
def forward(node: Node, x, **kwargs):
return node.f(x, **kwargs)
def initialize(node: Node, x=None, *args, **kwargs):
if x is not None:
node.set_input_dim(x.shape[1])
node.set_output_dim(x.shape[1])
class Softmax(Node):
"""Softmax activation function.
.. math::
y_k = \\frac{e^{\\beta x_k}}{\\sum_{i=1}^{n} e^{\\beta x_i}}
:py:attr:`Softmax.hypers` **list**
============= ======================================================================
``f`` Activation function (:py:func:`reservoir.activationsfunc.softmax`).
``beta`` Softmax :math:`\\beta` parameter (1.0 by default).
============= ======================================================================
Parameters
----------
beta: float, default to 1.0
Beta parameter of softmax.
input_dim : int, optional
Input dimension. Can be inferred at first call.
name : str, optional
Node name.
dtype : Numpy dtype, default to np.float64
Numerical type for node parameters.
"""
def __init__(self, beta=1.0, **kwargs):
super(Softmax, self).__init__(
hypers={"f": get_function("softmax"), "beta": beta},
forward=partial(forward, beta=beta),
initializer=initialize,
**kwargs,
)
class Softplus(Node):
"""Softplus activation function.
.. math::
f(x) = \\mathrm{ln}(1 + e^{x})
:py:attr:`Softplus.hypers` **list**
============= ======================================================================
``f`` Activation function (:py:func:`reservoir.activationsfunc.softplus`).
============= ======================================================================
Parameters
----------
input_dim : int, optional
Input dimension. Can be inferred at first call.
name : str, optional
Node name.
dtype : Numpy dtype, default to np.float64
Numerical type for node parameters.
"""
def __init__(self, **kwargs):
super(Softplus, self).__init__(
hypers={"f": get_function("softplus")},
forward=forward,
initializer=initialize,
**kwargs,
)
class Sigmoid(Node):
"""Sigmoid activation function.
.. math::
f(x) = \\frac{1}{1 + e^{-x}}
:py:attr:`Sigmoid.hypers` **list**
============= ======================================================================
``f`` Activation function (:py:func:`reservoir.activationsfunc.sigmoid`).
============= ======================================================================
Parameters
----------
input_dim : int, optional
Input dimension. Can be inferred at first call.
name : str, optional
Node name.
dtype : Numpy dtype, default to np.float64
Numerical type for node parameters.
"""
def __init__(self, **kwargs):
super(Sigmoid, self).__init__(
hypers={"f": get_function("sigmoid")},
forward=forward,
initializer=initialize,
**kwargs,
)
class Tanh(Node):
"""Hyperbolic tangent activation function.
.. math::
f(x) = \\frac{e^x - e^{-x}}{e^x + e^{-x}}
:py:attr:`Tanh.hypers` **list**
============= ======================================================================
``f`` Activation function (:py:func:`reservoir.activationsfunc.tanh`).
============= ======================================================================
Parameters
----------
input_dim : int, optional
Input dimension. Can be inferred at first call.
name : str, optional
Node name.
dtype : Numpy dtype, default to np.float64
Numerical type for node parameters.
"""
def __init__(self, **kwargs):
super(Tanh, self).__init__(
hypers={"f": get_function("tanh")},
forward=forward,
initializer=initialize,
**kwargs,
)
class Identity(Node):
"""Identity function.
.. math::
f(x) = x
Provided for convenience.
:py:attr:`Identity.hypers` **list**
============= ======================================================================
``f`` Activation function (:py:func:`reservoir.activationsfunc.identity`).
============= ======================================================================
Parameters
----------
input_dim : int, optional
Input dimension. Can be inferred at first call.
name : str, optional
Node name.
dtype : Numpy dtype, default to np.float64
Numerical type for node parameters.
"""
def __init__(self, **kwargs):
super(Identity, self).__init__(
hypers={"f": get_function("identity")},
forward=forward,
initializer=initialize,
**kwargs,
)
class ReLU(Node):
"""ReLU activation function.
.. math::
f(x) = x ~~ \\mathrm{if} ~~ x > 0 ~~ \\mathrm{else} ~~ 0
:py:attr:`ReLU.hypers` **list**
============= ======================================================================
``f`` Activation function (:py:func:`reservoir.activationsfunc.relu`).
============= ======================================================================
Parameters
----------
input_dim : int, optional
Input dimension. Can be inferred at first call.
name : str, optional
Node name.
dtype : Numpy dtype, default to np.float64
Numerical type for node parameters.
"""
def __init__(self, **kwargs):
super(ReLU, self).__init__(
hypers={"f": get_function("relu")},
forward=forward,
initializer=initialize,
**kwargs,
) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/nodes/activations.py | 0.899822 | 0.531574 | activations.py | pypi |
from ..node import Node
def _io_initialize(io_node: "Node", x=None, **kwargs):
if x is not None:
if io_node.input_dim is None:
io_node.set_input_dim(x.shape[1])
io_node.set_output_dim(x.shape[1])
def _input_forward(inp_node: "Input", x):
return x
class Input(Node):
"""Node feeding input data to other nodes in the models.
Allow creating an input source and connecting it to several nodes at once.
This node has no parameters and no hyperparameters.
Parameters
----------
input_dim : int
Input dimension. Can be inferred at first call.
name : str
Node name.
Example
-------
An input source feeding three different nodes in parallel.
>>> from reservoirpy.nodes import Reservoir, Input
>>> source = Input()
>>> res1, res2, res3 = Reservoir(100), Reservoir(100), Reservoir(100)
>>> model = source >> [res1, res2, res3]
A model with different input sources. Use names to identify each source at runtime.
>>> from reservoirpy.nodes import Reservoir, Input
>>> source1, source2 = Input(name="s1"), Input(name="s2")
>>> res1, res2 = Reservoir(100), Reservoir(100)
>>> model = source1 >> [res1, res2] & source2 >> [res1, res2]
>>> outputs = model.run({"s1": np.ones((10, 5)), "s2": np.ones((10, 3))})
"""
def __init__(self, input_dim=None, name=None, **kwargs):
super(Input, self).__init__(
forward=_input_forward,
initializer=_io_initialize,
input_dim=input_dim,
output_dim=input_dim,
name=name,
**kwargs,
)
class Output(Node):
"""Convenience node which can be used to add an output to a model.
For instance, this node can be connected to a reservoir within a model to inspect
its states.
Parameters
----------
name : str
Node name.
Example
-------
We can use the :py:class:`Output` node to probe the hidden states of Reservoir
in an Echo State Network:
>>> from reservoirpy.nodes import Reservoir, Ridge, Output
>>> reservoir = Reservoir(100)
>>> readout = Ridge()
>>> probe = Output(name="reservoir-states")
>>> esn = reservoir >> readout & reservoir >> probe
When running the model, states can then be retrieved as an output:
>>> data = np.ones((10, 5))
>>> outputs = esn.run(data)
>>> states = outputs["reservoir-states"]
"""
def __init__(self, name=None, **kwargs):
super(Output, self).__init__(
forward=_input_forward, initializer=_io_initialize, name=name, **kwargs
) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/nodes/io.py | 0.845369 | 0.60903 | io.py | pypi |
import warnings
from functools import partial
from numbers import Number
from typing import Iterable
from ...mat_gen import zeros
from ...node import Node
from .base import readout_forward
from .lms import initialize as initialize_lms
from .lms import train as lms_like_train
from .rls import initialize as initialize_rls
from .rls import train as rls_like_train
RULES = ("lms", "rls")
class FORCE(Node):
"""Single layer of neurons learning connections through online learning rules.
Warning
-------
This class is deprecated since v0.3.4 and will be removed in future versions.
Please use :py:class:`~reservoirpy.LMS` or :py:class:`~reservoirpy.RLS` instead.
The learning rules involved are similar to Recursive Least Squares (``rls`` rule)
as described in [1]_ or Least Mean Squares (``lms`` rule, similar to Hebbian
learning) as described in [2]_.
"FORCE" name refers to the training paradigm described in [1]_.
:py:attr:`FORCE.params` **list**
================== =================================================================
``Wout`` Learned output weights (:math:`\\mathbf{W}_{out}`).
``bias`` Learned bias (:math:`\\mathbf{b}`).
``P`` Matrix :math:`\\mathbf{P}` of RLS rule (optional).
================== =================================================================
:py:attr:`FORCE.hypers` **list**
================== =================================================================
``alpha`` Learning rate (:math:`\\alpha`) (:math:`1\\cdot 10^{-6}` by
default).
``input_bias`` If True, learn a bias term (True by default).
``rule`` One of RLS or LMS rule ("rls" by default).
================== =================================================================
Parameters
----------
output_dim : int, optional
Number of units in the readout, can be inferred at first call.
alpha : float or Python generator or iterable, default to 1e-6
Learning rate. If an iterable or a generator is provided and the learning
rule is "lms", then the learning rate can be changed at each timestep of
training. A new learning rate will be drawn from the iterable or generator
at each timestep.
rule : {"rls", "lms"}, default to "rls"
Learning rule applied for online training.
Wout : callable or array-like of shape (units, targets), default to
:py:func:`~reservoirpy.mat_gen.zeros`
Output weights matrix or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
bias : callable or array-like of shape (units, 1), default to
:py:func:`~reservoirpy.mat_gen.zeros`
Bias weights vector or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
input_bias : bool, default to True
If True, then a bias parameter will be learned along with output weights.
name : str, optional
Node name.
References
----------
.. [1] Sussillo, D., & Abbott, L. F. (2009). Generating Coherent Patterns of
Activity from Chaotic Neural Networks. Neuron, 63(4), 544–557.
https://doi.org/10.1016/j.neuron.2009.07.018
.. [2] Hoerzer, G. M., Legenstein, R., & Maass, W. (2014). Emergence of Complex
Computational Structures From Chaotic Neural Networks Through
Reward-Modulated Hebbian Learning. Cerebral Cortex, 24(3), 677–690.
https://doi.org/10.1093/cercor/bhs348
"""
def __init__(
self,
output_dim=None,
alpha=1e-6,
rule="rls",
Wout=zeros,
bias=zeros,
input_bias=True,
name=None,
):
warnings.warn(
"'FORCE' is deprecated since v0.3.4 and will be removed "
"in "
"future versions. Consider using 'RLS' or 'LMS'.",
DeprecationWarning,
)
params = {"Wout": None, "bias": None}
if rule not in RULES:
raise ValueError(
f"Unknown rule for FORCE learning. "
f"Available rules are {self._rules}."
)
else:
if rule == "lms":
train = lms_like_train
initialize = initialize_lms
else:
train = rls_like_train
initialize = initialize_rls
params["P"] = None
if isinstance(alpha, Number):
def _alpha_gen():
while True:
yield alpha
alpha_gen = _alpha_gen()
elif isinstance(alpha, Iterable):
alpha_gen = alpha
else:
raise TypeError(
"'alpha' parameter should be a float or an iterable yielding floats."
)
super(FORCE, self).__init__(
params=params,
hypers={
"alpha": alpha,
"_alpha_gen": alpha_gen,
"input_bias": input_bias,
"rule": rule,
},
forward=readout_forward,
train=train,
initializer=partial(
initialize, init_func=Wout, bias_init=bias, bias=input_bias
),
output_dim=output_dim,
name=name,
) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/nodes/readouts/force.py | 0.899505 | 0.553686 | force.py | pypi |
from functools import partial
import numpy as np
from scipy import linalg
from ...mat_gen import zeros
from ...node import Node
from ...type import global_dtype
from .base import _initialize_readout, _prepare_inputs_for_learning, readout_forward
def _solve_ridge(XXT, YXT, ridge):
"""Solve Tikhonov regression."""
return linalg.solve(XXT + ridge, YXT.T, assume_a="sym")
def _accumulate(readout, xxt, yxt):
"""Aggregate Xi.Xi^T and Yi.Xi^T matrices from a state sequence i."""
XXT = readout.get_buffer("XXT")
YXT = readout.get_buffer("YXT")
XXT += xxt
YXT += yxt
def partial_backward(readout: Node, X_batch, Y_batch=None, lock=None):
"""Pre-compute XXt and YXt before final fit."""
X, Y = _prepare_inputs_for_learning(
X_batch,
Y_batch,
bias=readout.input_bias,
allow_reshape=True,
)
xxt = X.T.dot(X)
yxt = Y.T.dot(X)
if lock is not None:
# This is not thread-safe using Numpy memmap as buffers
# ok for parallelization then with a lock (see ESN object)
with lock:
_accumulate(readout, xxt, yxt)
else:
_accumulate(readout, xxt, yxt)
def backward(readout: Node, *args, **kwargs):
ridge = readout.ridge
XXT = readout.get_buffer("XXT")
YXT = readout.get_buffer("YXT")
input_dim = readout.input_dim
if readout.input_bias:
input_dim += 1
ridgeid = ridge * np.eye(input_dim, dtype=global_dtype)
Wout_raw = _solve_ridge(XXT, YXT, ridgeid)
if readout.input_bias:
Wout, bias = Wout_raw[1:, :], Wout_raw[0, :][np.newaxis, :]
readout.set_param("Wout", Wout)
readout.set_param("bias", bias)
else:
readout.set_param("Wout", Wout_raw)
def initialize(readout: Node, x=None, y=None, bias_init=None, Wout_init=None):
_initialize_readout(
readout, x, y, bias=readout.input_bias, init_func=Wout_init, bias_init=bias_init
)
def initialize_buffers(readout):
"""create memmaped buffers for matrices X.X^T and Y.X^T pre-computed
in parallel for ridge regression
! only memmap can be used ! Impossible to share Numpy arrays with
different processes in r/w mode otherwise (with proper locking)
"""
input_dim = readout.input_dim
output_dim = readout.output_dim
if readout.input_bias:
input_dim += 1
readout.create_buffer("XXT", (input_dim, input_dim))
readout.create_buffer("YXT", (output_dim, input_dim))
class Ridge(Node):
"""A single layer of neurons learning with Tikhonov linear regression.
Output weights of the layer are computed following:
.. math::
\\hat{\\mathbf{W}}_{out} = \\mathbf{YX}^\\top ~ (\\mathbf{XX}^\\top +
\\lambda\\mathbf{Id})^{-1}
Outputs :math:`\\mathbf{y}` of the node are the result of:
.. math::
\\mathbf{y} = \\mathbf{W}_{out}^\\top \\mathbf{x} + \\mathbf{b}
where:
- :math:`\\mathbf{X}` is the accumulation of all inputs during training;
- :math:`\\mathbf{Y}` is the accumulation of all targets during training;
- :math:`\\mathbf{b}` is the first row of :math:`\\hat{\\mathbf{W}}_{out}`;
- :math:`\\mathbf{W}_{out}` is the rest of :math:`\\hat{\\mathbf{W}}_{out}`.
If ``input_bias`` is True, then :math:`\\mathbf{b}` is non-zero, and a constant
term is added to :math:`\\mathbf{X}` to compute it.
:py:attr:`Ridge.params` **list**
================== =================================================================
``Wout`` Learned output weights (:math:`\\mathbf{W}_{out}`).
``bias`` Learned bias (:math:`\\mathbf{b}`).
================== =================================================================
:py:attr:`Ridge.hypers` **list**
================== =================================================================
``ridge`` Regularization parameter (:math:`\\lambda`) (0.0 by default).
``input_bias`` If True, learn a bias term (True by default).
================== =================================================================
Parameters
----------
output_dim : int, optional
Number of units in the readout, can be inferred at first call.
ridge: float, default to 0.0
L2 regularization parameter.
Wout : callable or array-like of shape (units, targets), default to :py:func:`~reservoirpy.mat_gen.zeros`
Output weights matrix or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
bias : callable or array-like of shape (units, 1), default to :py:func:`~reservoirpy.mat_gen.zeros`
Bias weights vector or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
input_bias : bool, default to True
If True, then a bias parameter will be learned along with output weights.
name : str, optional
Node name.
"""
def __init__(
self,
output_dim=None,
ridge=0.0,
Wout=zeros,
bias=zeros,
input_bias=True,
name=None,
):
super(Ridge, self).__init__(
params={"Wout": None, "bias": None},
hypers={"ridge": ridge, "input_bias": input_bias},
forward=readout_forward,
partial_backward=partial_backward,
backward=backward,
output_dim=output_dim,
initializer=partial(initialize, Wout_init=Wout, bias_init=bias),
buffers_initializer=initialize_buffers,
name=name,
) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/nodes/readouts/ridge.py | 0.840259 | 0.377914 | ridge.py | pypi |
import numpy as np
from ...node import Node
from ...utils.validation import add_bias, check_vector
def _initialize_readout(
readout, x=None, y=None, init_func=None, bias_init=None, bias=True
):
if x is not None:
in_dim = x.shape[1]
if readout.output_dim is not None:
out_dim = readout.output_dim
elif y is not None:
out_dim = y.shape[1]
else:
raise RuntimeError(
f"Impossible to initialize {readout.name}: "
f"output dimension was not specified at "
f"creation, and no teacher vector was given."
)
readout.set_input_dim(in_dim)
readout.set_output_dim(out_dim)
if callable(init_func):
W = init_func(in_dim, out_dim, dtype=readout.dtype)
elif isinstance(init_func, np.ndarray):
W = (
check_vector(init_func, caller=readout)
.reshape(readout.input_dim, readout.output_dim)
.astype(readout.dtype)
)
else:
raise ValueError(
f"Data type {type(init_func)} not "
f"understood for matrix initializer "
f"'Wout'. It should be an array or "
f"a callable returning an array."
)
if bias:
if callable(bias_init):
bias = bias_init(1, out_dim, dtype=readout.dtype)
elif isinstance(bias_init, np.ndarray):
bias = (
check_vector(bias_init)
.reshape(1, readout.output_dim)
.astype(readout.dtype)
)
else:
raise ValueError(
f"Data type {type(bias_init)} not "
f"understood for matrix initializer "
f"'bias'. It should be an array or "
f"a callable returning an array."
)
else:
bias = np.zeros((1, out_dim), dtype=readout.dtype)
readout.set_param("Wout", W)
readout.set_param("bias", bias)
def _prepare_inputs_for_learning(X=None, Y=None, bias=True, allow_reshape=False):
if X is not None:
if bias:
X = add_bias(X)
if not isinstance(X, np.ndarray):
X = np.vstack(X)
X = check_vector(X, allow_reshape=allow_reshape)
if Y is not None:
if not isinstance(Y, np.ndarray):
Y = np.vstack(Y)
Y = check_vector(Y, allow_reshape=allow_reshape)
return X, Y
def readout_forward(node: Node, x):
return (node.Wout.T @ x.reshape(-1, 1) + node.bias.T).T
def _assemble_wout(Wout, bias, has_bias=True):
wo = Wout
if has_bias:
wo = np.r_[bias, wo]
return wo
def _split_and_save_wout(node, wo):
if node.input_bias:
Wout, bias = wo[1:, :], wo[0, :][np.newaxis, :]
node.set_param("Wout", Wout)
node.set_param("bias", bias)
else:
node.set_param("Wout", wo)
def _compute_error(node, x, y=None):
"""Error between target and prediction."""
prediction = node.state()
error = prediction - y
return error, x.T | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/nodes/readouts/base.py | 0.637482 | 0.388415 | base.py | pypi |
from functools import partial
import numpy as np
from ...mat_gen import zeros
from ...node import Node
from .base import (
_assemble_wout,
_compute_error,
_initialize_readout,
_prepare_inputs_for_learning,
_split_and_save_wout,
readout_forward,
)
def _rls(P, r, e):
"""Recursive Least Squares learning rule."""
k = np.dot(P, r)
rPr = np.dot(r.T, k)
c = float(1.0 / (1.0 + rPr))
P = P - c * np.outer(k, k)
dw = -c * np.outer(e, k)
return dw, P
def train(node: "RLS", x, y=None):
"""Train a readout using RLS learning rule."""
x, y = _prepare_inputs_for_learning(x, y, bias=node.input_bias, allow_reshape=True)
error, r = _compute_error(node, x, y)
P = node.P
dw, P = _rls(P, r, error)
wo = _assemble_wout(node.Wout, node.bias, node.input_bias)
wo = wo + dw.T
_split_and_save_wout(node, wo)
node.set_param("P", P)
def initialize(
readout: "RLS", x=None, y=None, init_func=None, bias_init=None, bias=None
):
_initialize_readout(readout, x, y, init_func, bias_init, bias)
if x is not None:
input_dim, alpha = readout.input_dim, readout.alpha
if readout.input_bias:
input_dim += 1
P = np.eye(input_dim) / alpha
readout.set_param("P", P)
class RLS(Node):
"""Single layer of neurons learning connections using Recursive Least Squares
algorithm.
The learning rules is well described in [1]_.
:py:attr:`RLS.params` **list**
================== =================================================================
``Wout`` Learned output weights (:math:`\\mathbf{W}_{out}`).
``bias`` Learned bias (:math:`\\mathbf{b}`).
``P`` Matrix :math:`\\mathbf{P}` of RLS rule.
================== =================================================================
:py:attr:`RLS.hypers` **list**
================== =================================================================
``alpha`` Diagonal value of matrix P (:math:`\\alpha`) (:math:`1\\cdot 10^{-6}` by default).
``input_bias`` If True, learn a bias term (True by default).
================== =================================================================
Parameters
----------
output_dim : int, optional
Number of units in the readout, can be inferred at first call.
alpha : float or Python generator or iterable, default to 1e-6
Diagonal value of matrix P.
Wout : callable or array-like of shape (units, targets), default to :py:func:`~reservoirpy.mat_gen.zeros`
Output weights matrix or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
bias : callable or array-like of shape (units, 1), default to :py:func:`~reservoirpy.mat_gen.zeros`
Bias weights vector or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
input_bias : bool, default to True
If True, then a bias parameter will be learned along with output weights.
name : str, optional
Node name.
References
----------
.. [1] Sussillo, D., & Abbott, L. F. (2009). Generating Coherent Patterns of
Activity from Chaotic Neural Networks. Neuron, 63(4), 544–557.
https://doi.org/10.1016/j.neuron.2009.07.018
"""
def __init__(
self,
output_dim=None,
alpha=1e-6,
Wout=zeros,
bias=zeros,
input_bias=True,
name=None,
):
super(RLS, self).__init__(
params={"Wout": None, "bias": None, "P": None},
hypers={
"alpha": alpha,
"input_bias": input_bias,
},
forward=readout_forward,
train=train,
initializer=partial(
initialize, init_func=Wout, bias_init=bias, bias=input_bias
),
output_dim=output_dim,
name=name,
) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/nodes/readouts/rls.py | 0.904445 | 0.478346 | rls.py | pypi |
from functools import partial
from numbers import Number
from typing import Iterable
import numpy as np
from ...mat_gen import zeros
from ...node import Node
from .base import (
_assemble_wout,
_compute_error,
_initialize_readout,
_prepare_inputs_for_learning,
_split_and_save_wout,
readout_forward,
)
def _lms(alpha, r, e):
"""Least Mean Squares learning rule."""
# learning rate is a generator to allow scheduling
dw = -next(alpha) * np.outer(e, r)
return dw
def train(node: "LMS", x, y=None):
"""Train a readout using LMS learning rule."""
x, y = _prepare_inputs_for_learning(x, y, bias=node.input_bias, allow_reshape=True)
error, r = _compute_error(node, x, y)
alpha = node._alpha_gen
dw = _lms(alpha, r, error)
wo = _assemble_wout(node.Wout, node.bias, node.input_bias)
wo = wo + dw.T
_split_and_save_wout(node, wo)
def initialize(
readout: "LMS", x=None, y=None, init_func=None, bias_init=None, bias=None
):
_initialize_readout(readout, x, y, init_func, bias_init, bias)
class LMS(Node):
"""Single layer of neurons learning connections using Least Mean Squares
algorithm.
The learning rules is well described in [1]_.
:py:attr:`LMS.params` **list**
================== =================================================================
``Wout`` Learned output weights (:math:`\\mathbf{W}_{out}`).
``bias`` Learned bias (:math:`\\mathbf{b}`).
``P`` Matrix :math:`\\mathbf{P}` of RLS rule.
================== =================================================================
:py:attr:`LMS.hypers` **list**
================== =================================================================
``alpha`` Learning rate (:math:`\\alpha`) (:math:`1\\cdot 10^{-6}` by default).
``input_bias`` If True, learn a bias term (True by default).
================== =================================================================
Parameters
----------
output_dim : int, optional
Number of units in the readout, can be inferred at first call.
alpha : float or Python generator or iterable, default to 1e-6
Learning rate. If an iterable or a generator is provided, the learning rate can
be changed at each timestep of training. A new learning rate will be drawn from
the iterable or generator at each timestep.
Wout : callable or array-like of shape (units, targets), default to :py:func:`~reservoirpy.mat_gen.zeros`
Output weights matrix or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
bias : callable or array-like of shape (units, 1), default to :py:func:`~reservoirpy.mat_gen.zeros`
Bias weights vector or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
input_bias : bool, default to True
If True, then a bias parameter will be learned along with output weights.
name : str, optional
Node name.
References
----------
.. [1] Sussillo, D., & Abbott, L. F. (2009). Generating Coherent Patterns of
Activity from Chaotic Neural Networks. Neuron, 63(4), 544–557.
https://doi.org/10.1016/j.neuron.2009.07.018
"""
def __init__(
self,
output_dim=None,
alpha=1e-6,
Wout=zeros,
bias=zeros,
input_bias=True,
name=None,
):
if isinstance(alpha, Number):
def _alpha_gen():
while True:
yield alpha
alpha_gen = _alpha_gen()
elif isinstance(alpha, Iterable):
alpha_gen = alpha
else:
raise TypeError(
"'alpha' parameter should be a float or an iterable yielding floats."
)
super(LMS, self).__init__(
params={"Wout": None, "bias": None},
hypers={
"alpha": alpha,
"_alpha_gen": alpha_gen,
"input_bias": input_bias,
},
forward=readout_forward,
train=train,
initializer=partial(
initialize, init_func=Wout, bias_init=bias, bias=input_bias
),
output_dim=output_dim,
name=name,
) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/nodes/readouts/lms.py | 0.944164 | 0.500732 | lms.py | pypi |
import sys
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
from functools import partial
from typing import Callable, Optional, Sequence, Union
from ...activationsfunc import get_function, identity, tanh
from ...mat_gen import bernoulli, normal
from ...node import Node
from ...type import Weights
from ...utils.random import noise
from ...utils.validation import is_array
from .base import forward_external, forward_internal, initialize, initialize_feedback
class Reservoir(Node):
"""Pool of leaky-integrator neurons with random recurrent connexions.
Reservoir neurons states, gathered in a vector :math:`\\mathbf{x}`, may follow
one of the two update rules below:
- **1.** Activation function is part of the neuron internal state
(equation called ``internal``):
.. math::
\\mathbf{x}[t+1] = (1 - \\mathrm{lr}) * \\mathbf{x}[t] + \\mathrm{lr}
* (\\mathbf{W}_{in} \\cdot (\\mathbf{u}[t+1]+c_{in}*\\xi)
+ \\mathbf{W} \\cdot \\mathbf{x}[t]
+ \\mathbf{W}_{fb} \\cdot (g(\\mathbf{y}[t])+c_{fb}*\\xi) + \\mathbf{b})
+ c * \\xi
- **2.** Activation function is applied on emitted internal states
(equation called ``external``):
.. math::
\\mathbf{r}[t+1] = (1 - \\mathrm{lr}) * \\mathbf{r}[t] + \\mathrm{lr}
* (\\mathbf{W}_{in} \\cdot (\\mathbf{u}[t+1]+c_{in}*\\xi)
+ \\mathbf{W} \\cdot \\mathbf{x}[t]
+ \\mathbf{W}_{fb} \\cdot (g(\\mathbf{y}[t])+c_{fb}*\\xi) + \\mathbf{b})
.. math::
\\mathbf{x}[t+1] = f(\\mathbf{r}[t+1]) + c * \\xi
where:
- :math:`\\mathbf{x}` is the output activation vector of the reservoir;
- :math:`\\mathbf{r}` is the (optional) internal activation vector of the reservoir;
- :math:`\\mathbf{u}` is the input timeseries;
- :math:`\\mathbf{y}` is a feedback vector;
- :math:`\\xi` is a random noise;
- :math:`f` and :math:`g` are activation functions.
:py:attr:`Reservoir.params` **list:**
================== ===================================================================
``W`` Recurrent weights matrix (:math:`\\mathbf{W}`).
``Win`` Input weights matrix (:math:`\\mathbf{W}_{in}`).
``Wfb`` Feedback weights matrix (:math:`\\mathbf{W}_{fb}`).
``bias`` Input bias vector (:math:`\\mathbf{b}`).
``inernal_state`` Internal state used with equation="external" (:math:`\\mathbf{r}`).
================== ===================================================================
:py:attr:`Reservoir.hypers` **list:**
======================= ========================================================
``lr`` Leaking rate (1.0 by default) (:math:`\\mathrm{lr}`).
``sr`` Spectral radius of ``W`` (optional).
``input_scaling`` Input scaling (float or array) (1.0 by default).
``fb_scaling`` Feedback scaling (float or array) (1.0 by default).
``rc_connectivity`` Connectivity (or density) of ``W`` (0.1 by default).
``input_connectivity`` Connectivity (or density) of ``Win`` (0.1 by default).
``fb_connectivity`` Connectivity (or density) of ``Wfb`` (0.1 by default).
``noise_in`` Input noise gain (0 by default) (:math:`c_{in} * \\xi`).
``noise_rc`` Reservoir state noise gain (0 by default) (:math:`c * \\xi`).
``noise_fb`` Feedback noise gain (0 by default) (:math:`c_{fb} * \\xi`).
``noise_type`` Distribution of noise (normal by default) (:math:`\\xi \\sim \\mathrm{Noise~type}`).
``activation`` Activation of the reservoir units (tanh by default) (:math:`f`).
``fb_activation`` Activation of the feedback units (identity by default) (:math:`g`).
``units`` Number of neuronal units in the reservoir.
``noise_generator`` A random state generator.
======================= ========================================================
Parameters
----------
units : int, optional
Number of reservoir units. If None, the number of units will be infered from
the ``W`` matrix shape.
lr : float, default to 1.0
Neurons leak rate. Must be in :math:`[0, 1]`.
sr : float, optional
Spectral radius of recurrent weight matrix.
input_bias : bool, default to True
If False, no bias is added to inputs.
noise_rc : float, default to 0.0
Gain of noise applied to reservoir activations.
noise_in : float, default to 0.0
Gain of noise applied to input inputs.
noise_fb : float, default to 0.0
Gain of noise applied to feedback signal.
noise_type : str, default to "normal"
Distribution of noise. Must be a Numpy random variable generator
distribution (see :py:class:`numpy.random.Generator`).
input_scaling : float or array-like of shape (features,), default to 1.0.
Input gain. An array of the same dimension as the inputs can be used to
set up different input scaling for each feature.
bias_scaling: float, default to 1.0
Bias gain.
fb_scaling : float or array-like of shape (features,), default to 1.0
Feedback gain. An array of the same dimension as the feedback can be used to
set up different feedback scaling for each feature.
input_connectivity : float, default to 0.1
Connectivity of input neurons, i.e. ratio of input neurons connected
to reservoir neurons. Must be in :math:`]0, 1]`.
rc_connectivity : float, default to 0.1
Connectivity of recurrent weight matrix, i.e. ratio of reservoir
neurons connected to other reservoir neurons, including themselves.
Must be in :math:`]0, 1]`.
fb_connectivity : float, default to 0.1
Connectivity of feedback neurons, i.e. ratio of feedabck neurons
connected to reservoir neurons. Must be in :math:`]0, 1]`.
Win : callable or array-like of shape (units, features), default to :py:func:`~reservoirpy.mat_gen.bernoulli`
Input weights matrix or initializer. If a callable (like a function) is used,
then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
W : callable or array-like of shape (units, units), default to :py:func:`~reservoirpy.mat_gen.normal`
Recurrent weights matrix or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
bias : callable or array-like of shape (units, 1), default to :py:func:`~reservoirpy.mat_gen.bernoulli`
Bias weights vector or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
Wfb : callable or array-like of shape (units, feedback), default to :py:func:`~reservoirpy.mat_gen.bernoulli`
Feedback weights matrix or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
fb_activation : str or callable, default to :py:func:`~reservoirpy.activationsfunc.identity`
Feedback activation function.
- If a str, should be a :py:mod:`~reservoirpy.activationsfunc`
function name.
- If a callable, should be an element-wise operator on arrays.
activation : str or callable, default to :py:func:`~reservoirpy.activationsfunc.tanh`
Reservoir units activation function.
- If a str, should be a :py:mod:`~reservoirpy.activationsfunc`
function name.
- If a callable, should be an element-wise operator on arrays.
equation : {"internal", "external"}, default to "internal"
If "internal", will use equation defined in equation 1 to update the state of
reservoir units. If "external", will use the equation defined in equation 2
(see above).
feedback_dim : int, optional
Feedback dimension. Can be inferred at first call.
input_dim : int, optional
Input dimension. Can be inferred at first call.
name : str, optional
Node name.
dtype : Numpy dtype, default to np.float64
Numerical type for node parameters.
seed : int or :py:class:`numpy.random.Generator`, optional
A random state seed, for noise generation.
Note
----
If W, Win, bias or Wfb are initialized with an array-like matrix, then all
initializers parameters such as sprectral radius (``sr``) or input scaling
(``input_scaling``) are ignored.
See :py:mod:`~reservoirpy.mat_gen` for more information.
Example
-------
>>> from reservoirpy.nodes import Reservoir
>>> reservoir = Reservoir(100, lr=0.2, sr=0.8) # a 100 neurons reservoir
Using the :py:func:`~reservoirpy.datasets.mackey_glass` timeseries:
>>> from reservoirpy.datasets import mackey_glass
>>> x = mackey_glass(200)
>>> states = reservoir.run(x)
.. plot::
from reservoirpy.nodes import Reservoir
reservoir = Reservoir(100, lr=0.2, sr=0.8)
from reservoirpy.datasets import mackey_glass
x = mackey_glass(200)
states = reservoir.run(x)
fig, ax = plt.subplots(6, 1, figsize=(7, 10), sharex=True)
ax[0].plot(x)
ax[0].grid()
ax[0].set_title("Input (Mackey-Glass")
for i in range(1, 6):
ax[i].plot(states[:, i], label=f"Neuron {i}")
ax[i].legend()
ax[i].grid()
ax[-1].set_xlabel("Timesteps")
"""
def __init__(
self,
units: int = None,
lr: float = 1.0,
sr: Optional[float] = None,
input_bias: bool = True,
noise_rc: float = 0.0,
noise_in: float = 0.0,
noise_fb: float = 0.0,
noise_type: str = "normal",
input_scaling: Union[float, Sequence] = 1.0,
bias_scaling: float = 1.0,
fb_scaling: Union[float, Sequence] = 1.0,
input_connectivity: float = 0.1,
rc_connectivity: float = 0.1,
fb_connectivity: float = 0.1,
Win: Union[Weights, Callable] = bernoulli,
W: Union[Weights, Callable] = normal,
Wfb: Union[Weights, Callable] = bernoulli,
bias: Union[Weights, Callable] = bernoulli,
fb_activation: Union[str, Callable] = identity,
activation: Union[str, Callable] = tanh,
equation: Literal["internal", "external"] = "internal",
input_dim: Optional[int] = None,
feedback_dim: Optional[int] = None,
seed=None,
**kwargs,
):
if units is None and not is_array(W):
raise ValueError(
"'units' parameter must not be None if 'W' parameter is not "
"a matrix."
)
if equation == "internal":
forward = forward_internal
elif equation == "external":
forward = forward_external
else:
raise ValueError(
"'equation' parameter must be either 'internal' or 'external'."
)
if type(activation) is str:
activation = get_function(activation)
if type(fb_activation) is str:
fb_activation = get_function(fb_activation)
super(Reservoir, self).__init__(
fb_initializer=partial(
initialize_feedback,
Wfb_init=Wfb,
fb_scaling=fb_scaling,
fb_connectivity=fb_connectivity,
seed=seed,
),
params={
"W": None,
"Win": None,
"Wfb": None,
"bias": None,
"internal_state": None,
},
hypers={
"lr": lr,
"sr": sr,
"input_scaling": input_scaling,
"bias_scaling": bias_scaling,
"fb_scaling": fb_scaling,
"rc_connectivity": rc_connectivity,
"input_connectivity": input_connectivity,
"fb_connectivity": fb_connectivity,
"noise_in": noise_in,
"noise_rc": noise_rc,
"noise_out": noise_fb,
"noise_type": noise_type,
"activation": activation,
"fb_activation": fb_activation,
"units": units,
"noise_generator": partial(noise, seed=seed),
},
forward=forward,
initializer=partial(
initialize,
sr=sr,
input_scaling=input_scaling,
bias_scaling=bias_scaling,
input_connectivity=input_connectivity,
rc_connectivity=rc_connectivity,
W_init=W,
Win_init=Win,
bias_init=bias,
input_bias=input_bias,
seed=seed,
),
output_dim=units,
feedback_dim=feedback_dim,
input_dim=input_dim,
**kwargs,
) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/nodes/reservoirs/reservoir.py | 0.769946 | 0.599514 | reservoir.py | pypi |
import sys
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
from functools import partial
from typing import Callable, Optional, Sequence, Union
import numpy as np
from ..._base import check_xy
from ...activationsfunc import get_function, identity
from ...mat_gen import bernoulli, uniform
from ...node import Unsupervised, _init_with_sequences
from ...type import Weights
from ...utils.random import noise
from ...utils.validation import is_array
from .base import forward_external
from .base import initialize as initialize_base
from .base import initialize_feedback
def gaussian_gradients(x, y, a, mu, sigma, eta):
"""KL loss gradients of neurons with tanh activation (~ Normal(mu, sigma))."""
sig2 = sigma**2
delta_b = -eta * (-(mu / sig2) + (y / sig2) * (2 * sig2 + 1 - y**2 + mu * y))
delta_a = (eta / a) + delta_b * x
return delta_a, delta_b
def exp_gradients(x, y, a, mu, eta):
"""KL loss gradients of neurons with sigmoid activation
(~ Exponential(lambda=1/mu))."""
delta_b = eta * (1 - (2 + (1 / mu)) * y + (y**2) / mu)
delta_a = (eta / a) + delta_b * x
return delta_a, delta_b
def apply_gradients(a, b, delta_a, delta_b):
"""Apply gradients on a and b parameters of intrinsic plasticity."""
a2 = a + delta_a
b2 = b + delta_b
return a2, b2
def ip(reservoir, pre_state, post_state):
"""Perform one step of intrinsic plasticity.
Optimize a and b such that
post_state = f(a*pre_state+b) ~ Dist(params) where Dist can be normal or
exponential."""
a = reservoir.a
b = reservoir.b
mu = reservoir.mu
eta = reservoir.learning_rate
if reservoir.activation_type == "tanh":
sigma = reservoir.sigma
delta_a, delta_b = gaussian_gradients(
x=pre_state.T, y=post_state.T, a=a, mu=mu, sigma=sigma, eta=eta
)
else: # sigmoid
delta_a, delta_b = exp_gradients(
x=pre_state.T, y=post_state.T, a=a, mu=mu, eta=eta
)
return apply_gradients(a=a, b=b, delta_a=delta_a, delta_b=delta_b)
def ip_activation(state, *, reservoir, f):
"""Activation of neurons f(a*x+b) where a and b are intrinsic plasticity
parameters."""
a, b = reservoir.a, reservoir.b
return f(a * state + b)
def backward(reservoir: "IPReservoir", X=None, *args, **kwargs):
for e in range(reservoir.epochs):
for seq in X:
for u in seq:
post_state = reservoir.call(u.reshape(1, -1))
pre_state = reservoir.internal_state
a, b = ip(reservoir, pre_state, post_state)
reservoir.set_param("a", a)
reservoir.set_param("b", b)
def initialize(reservoir, *args, **kwargs):
initialize_base(reservoir, *args, **kwargs)
a = np.ones((reservoir.output_dim, 1))
b = np.zeros((reservoir.output_dim, 1))
reservoir.set_param("a", a)
reservoir.set_param("b", b)
class IPReservoir(Unsupervised):
"""Pool of neurons with random recurrent connexions, tuned using Intrinsic
Plasticity.
Intrinisc Plasticity is applied as described in [1]_ and [2]_.
Reservoir neurons states, gathered in a vector :math:`\\mathbf{x}`, follow
the update rule below:
.. math::
\\mathbf{r}[t+1] = (1 - \\mathrm{lr}) * \\mathbf{r}[t] + \\mathrm{lr}
* (\\mathbf{W}_{in} \\cdot (\\mathbf{u}[t+1]+c_{in}*\\xi)
+ \\mathbf{W} \\cdot \\mathbf{x}[t]
+ \\mathbf{W}_{fb} \\cdot (g(\\mathbf{y}[t])+c_{fb}*\\xi) + \\mathbf{b}_{in})
.. math::
\\mathbf{x}[t+1] = f(\\mathbf{a}*\\mathbf{r}[t+1]+\\mathbf{b}) + c * \\xi
Parameters :math:`\\mathbf{a}` and :math:`\\mathbf{b}` are updated following two
different rules:
- **1.** Neuron activation is tanh:
In that case, output distribution should be a Gaussian distribution of parameters
(:math:`\\mu`, :math:`\\sigma`). The learning rule to obtain this output
distribution is described in [2]_.
- **2.** Neuron activation is sigmoid:
In that case, output distribution should be an exponential distribution of
parameter :math:`\\mu = \\frac{1}{\\lambda}`.
The learning rule to obtain this output distribution is described in [1]_ and [2]_.
where:
- :math:`\\mathbf{x}` is the output activation vector of the reservoir;
- :math:`\\mathbf{r}` is the internal activation vector of the reservoir;
- :math:`\\mathbf{u}` is the input timeseries;
- :math:`\\mathbf{y}` is a feedback vector;
- :math:`\\xi` is a random noise;
- :math:`f` and :math:`g` are activation functions.
:py:attr:`IPReservoir.params` **list:**
================== =================================================================
``W`` Recurrent weights matrix (:math:`\\mathbf{W}`).
``Win`` Input weights matrix (:math:`\\mathbf{W}_{in}`).
``Wfb`` Feedback weights matrix (:math:`\\mathbf{W}_{fb}`).
``bias`` Input bias vector (:math:`\\mathbf{b}_{in}`).
``inernal_state`` Internal state (:math:`\\mathbf{r}`).
``a`` Gain of reservoir activation (:math:`\\mathbf{a}`).
``b`` Bias of reservoir activation (:math:`\\mathbf{b}`).
================== =================================================================
:py:attr:`IPReservoir.hypers` **list:**
======================= ========================================================
``lr`` Leaking rate (1.0 by default) (:math:`\\mathrm{lr}`).
``sr`` Spectral radius of ``W`` (optional).
``mu`` Mean of the target distribution (0.0 by default) (:math:`\\mu`).
``sigma`` Variance of the target distribution (1.0 by default) (:math:`\\sigma`).
``learning_rate`` Learning rate (5e-4 by default).
``epochs`` Number of epochs for training (1 by default).
``input_scaling`` Input scaling (float or array) (1.0 by default).
``fb_scaling`` Feedback scaling (float or array) (1.0 by default).
``rc_connectivity`` Connectivity (or density) of ``W`` (0.1 by default).
``input_connectivity`` Connectivity (or density) of ``Win`` (0.1 by default).
``fb_connectivity`` Connectivity (or density) of ``Wfb`` (0.1 by default).
``noise_in`` Input noise gain (0 by default) (:math:`c_{in} * \\xi`).
``noise_rc`` Reservoir state noise gain (0 by default) (:math:`c*\\xi`).
``noise_fb`` Feedback noise gain (0 by default) (:math:`c_{fb}*\\xi`).
``noise_type`` Distribution of noise (normal by default) (:math:`\\xi\\sim\\mathrm{Noise~type}`).
``activation`` Activation of the reservoir units (tanh by default) (:math:`f`).
``fb_activation`` Activation of the feedback units (identity by default) (:math:`g`).
``units`` Number of neuronal units in the reservoir.
``noise_generator`` A random state generator.
======================= ========================================================
Parameters
----------
units : int, optional
Number of reservoir units. If None, the number of units will be infered from
the ``W`` matrix shape.
lr : float, default to 1.0
Neurons leak rate. Must be in :math:`[0, 1]`.
sr : float, optional
Spectral radius of recurrent weight matrix.
mu : float, default to 0.0
Mean of the target distribution.
sigma : float, default to 1.0
Variance of the target distribution.
learning_rate : float, default to 5e-4
Learning rate.
epochs : int, default to 1
Number of training iterations.
input_bias : bool, default to True
If False, no bias is added to inputs.
noise_rc : float, default to 0.0
Gain of noise applied to reservoir activations.
noise_in : float, default to 0.0
Gain of noise applied to input inputs.
noise_fb : float, default to 0.0
Gain of noise applied to feedback signal.
noise_type : str, default to "normal"
Distribution of noise. Must be a Numpy random variable generator
distribution (see :py:class:`numpy.random.Generator`).
input_scaling : float or array-like of shape (features,), default to 1.0.
Input gain. An array of the same dimension as the inputs can be used to
set up different input scaling for each feature.
bias_scaling: float, default to 1.0
Bias gain.
fb_scaling : float or array-like of shape (features,), default to 1.0
Feedback gain. An array of the same dimension as the feedback can be used to
set up different feedback scaling for each feature.
input_connectivity : float, default to 0.1
Connectivity of input neurons, i.e. ratio of input neurons connected
to reservoir neurons. Must be in :math:`]0, 1]`.
rc_connectivity : float, default to 0.1
Connectivity of recurrent weight matrix, i.e. ratio of reservoir
neurons connected to other reservoir neurons, including themselves.
Must be in :math:`]0, 1]`.
fb_connectivity : float, default to 0.1
Connectivity of feedback neurons, i.e. ratio of feedabck neurons
connected to reservoir neurons. Must be in :math:`]0, 1]`.
Win : callable or array-like of shape (units, features), default to :py:func:`~reservoirpy.mat_gen.bernoulli`
Input weights matrix or initializer. If a callable (like a function) is
used,
then this function should accept any keywords
parameters and at least two parameters that will be used to define the
shape of
the returned weight matrix.
W : callable or array-like of shape (units, units), default to :py:func:`~reservoirpy.mat_gen.uniform`
Recurrent weights matrix or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the
shape of
the returned weight matrix.
bias : callable or array-like of shape (units, 1), default to :py:func:`~reservoirpy.mat_gen.bernoulli`
Bias weights vector or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the
shape of
the returned weight matrix.
Wfb : callable or array-like of shape (units, feedback), default to :py:func:`~reservoirpy.mat_gen.bernoulli`
Feedback weights matrix or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the
shape of
the returned weight matrix.
fb_activation : str or callable, default to :py:func:`~reservoirpy.activationsfunc.identity`
Feedback activation function.
- If a str, should be a :py:mod:`~reservoirpy.activationsfunc`
function name.
- If a callable, should be an element-wise operator on arrays.
activation : {"tanh", "sigmoid"}, default to "tanh"
Reservoir units activation function.
feedback_dim : int, optional
Feedback dimension. Can be inferred at first call.
input_dim : int, optional
Input dimension. Can be inferred at first call.
name : str, optional
Node name.
dtype : Numpy dtype, default to np.float64
Numerical type for node parameters.
seed : int or :py:class:`numpy.random.Generator`, optional
A random state seed, for noise generation.
References
----------
.. [1] Triesch, J. (2005). A Gradient Rule for the Plasticity of a
Neuron’s Intrinsic Excitability. In W. Duch, J. Kacprzyk,
E. Oja, & S. Zadrożny (Eds.), Artificial Neural Networks:
Biological Inspirations – ICANN 2005 (pp. 65–70).
Springer. https://doi.org/10.1007/11550822_11
.. [2] Schrauwen, B., Wardermann, M., Verstraeten, D., Steil, J. J.,
& Stroobandt, D. (2008). Improving reservoirs using intrinsic
plasticity. Neurocomputing, 71(7), 1159–1171.
https://doi.org/10.1016/j.neucom.2007.12.020
Example
-------
>>> from reservoirpy.nodes import IPReservoir
>>> reservoir = IPReservoir(
... 100, mu=0.0, sigma=0.1, sr=0.95, activation="tanh", epochs=10)
We can fit the intrinsic plasticity parameters to reach a normal distribution
of the reservoir activations.
Using the :py:func:`~reservoirpy.datasets.narma` timeseries:
>>> from reservoirpy.datasets import narma
>>> x = narma(1000)
>>> reservoir.fit(x, warmup=100)
>>> states = reservoir.run(x)
.. plot:: ./api/generated/intrinsic_plasticity_example.py
"""
def __init__(
self,
units: int = None,
sr: Optional[float] = None,
lr: float = 1.0,
mu: float = 0.0,
sigma: float = 1.0,
learning_rate: float = 5e-4,
epochs: int = 1,
input_bias: bool = True,
noise_rc: float = 0.0,
noise_in: float = 0.0,
noise_fb: float = 0.0,
noise_type: str = "normal",
input_scaling: Union[float, Sequence] = 1.0,
bias_scaling: float = 1.0,
fb_scaling: Union[float, Sequence] = 1.0,
input_connectivity: Optional[float] = 0.1,
rc_connectivity: Optional[float] = 0.1,
fb_connectivity: Optional[float] = 0.1,
Win: Union[Weights, Callable] = bernoulli,
W: Union[Weights, Callable] = uniform,
Wfb: Union[Weights, Callable] = bernoulli,
bias: Union[Weights, Callable] = bernoulli,
feedback_dim: int = None,
fb_activation: Union[str, Callable] = identity,
activation: Literal["tanh", "sigmoid"] = "tanh",
name=None,
seed=None,
**kwargs,
):
if units is None and not is_array(W):
raise ValueError(
"'units' parameter must not be None if 'W' parameter is not "
"a matrix."
)
if activation not in ["tanh", "sigmoid"]:
raise ValueError(
f"Activation '{activation}' must be 'tanh' or 'sigmoid' when "
"appliying intrinsic plasticity."
)
super(IPReservoir, self).__init__(
fb_initializer=partial(
initialize_feedback,
Wfb_init=Wfb,
fb_scaling=fb_scaling,
fb_connectivity=fb_connectivity,
seed=seed,
),
params={
"W": None,
"Win": None,
"Wfb": None,
"bias": None,
"a": None,
"b": None,
"internal_state": None,
},
hypers={
"sr": sr,
"lr": lr,
"mu": mu,
"sigma": sigma,
"learning_rate": learning_rate,
"epochs": epochs,
"input_bias": input_bias,
"input_scaling": input_scaling,
"fb_scaling": fb_scaling,
"rc_connectivity": rc_connectivity,
"input_connectivity": input_connectivity,
"fb_connectivity": fb_connectivity,
"noise_in": noise_in,
"noise_rc": noise_rc,
"noise_out": noise_fb,
"noise_type": noise_type,
"activation_type": activation,
"activation": partial(
ip_activation, reservoir=self, f=get_function(activation)
),
"fb_activation": fb_activation,
"units": units,
"noise_generator": partial(noise, seed=seed),
},
forward=forward_external,
initializer=partial(
initialize,
input_bias=input_bias,
bias_scaling=bias_scaling,
sr=sr,
input_scaling=input_scaling,
input_connectivity=input_connectivity,
rc_connectivity=rc_connectivity,
W_init=W,
Win_init=Win,
bias_init=bias,
seed=seed,
),
backward=backward,
output_dim=units,
feedback_dim=feedback_dim,
name=name,
**kwargs,
)
# TODO: handle unsupervised learners with a specific attribute
@property
def fitted(self):
return True
def partial_fit(self, X_batch, Y_batch=None, warmup=0, **kwargs) -> "Node":
"""Partial offline fitting method of a Node.
Can be used to perform batched fitting or to precompute some variables
used by the fitting method.
Parameters
----------
X_batch : array-like of shape ([series], timesteps, features)
A sequence or a batch of sequence of input data.
Y_batch : array-like of shape ([series], timesteps, features), optional
A sequence or a batch of sequence of teacher signals.
warmup : int, default to 0
Number of timesteps to consider as warmup and
discard at the begining of each timeseries before training.
Returns
-------
Node
Partially fitted Node.
"""
X, _ = check_xy(self, X_batch, allow_n_inputs=False)
X, _ = _init_with_sequences(self, X)
self.initialize_buffers()
for i in range(len(X)):
X_seq = X[i]
if X_seq.shape[0] <= warmup:
raise ValueError(
f"Warmup set to {warmup} timesteps, but one timeseries is only "
f"{X_seq.shape[0]} long."
)
if warmup > 0:
self.run(X_seq[:warmup])
self._partial_backward(self, X_seq[warmup:])
return self | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/nodes/reservoirs/intrinsic_plasticity.py | 0.785144 | 0.513546 | intrinsic_plasticity.py | pypi |
import json
import os
import time
import warnings
from functools import partial
from glob import glob
from os import path
import numpy as np
def _get_conf_from_json(confpath):
if not (path.isfile(confpath)):
raise FileNotFoundError(f"Training conf '{confpath}' not found.")
else:
config = {}
with open(confpath, "r") as f:
config = json.load(f)
return _parse_config(config)
def _parse_config(config):
import hyperopt as hopt
required_args = ["exp", "hp_max_evals", "hp_method", "hp_space"]
for arg in required_args:
if config.get(arg) is None:
raise ValueError(f"No {arg} argument found in config file.")
if config["hp_method"] not in ["tpe", "random"]:
raise ValueError(
f"Unknow hyperopt algorithm: {config['hp_method']}. "
"Available algorithms: 'random', 'tpe'."
)
else:
if config["hp_method"] == "random":
config["hp_method"] = partial(hopt.rand.suggest)
if config["hp_method"] == "tpe":
config["hp_method"] = partial(hopt.tpe.suggest)
space = {}
for arg, specs in config["hp_space"].items():
space[arg] = _parse_hyperopt_searchspace(arg, specs)
config["hp_space"] = space
return config
def _parse_hyperopt_searchspace(arg, specs):
import hyperopt as hopt
if specs[0] == "choice":
return hopt.hp.choice(arg, specs[1:])
if specs[0] == "randint":
return hopt.hp.randint(arg, *specs[1:])
if specs[0] == "uniform":
return hopt.hp.uniform(arg, *specs[1:])
if specs[0] == "quniform":
return hopt.hp.quniform(arg, *specs[1:])
if specs[0] == "loguniform":
return hopt.hp.loguniform(arg, np.log(specs[1]), np.log(specs[2]))
if specs[0] == "qloguniform":
return hopt.hp.qloguniform(arg, np.log(specs[1]), np.log(specs[2]), specs[3])
if specs[0] == "normal":
return hopt.hp.normal(arg, *specs[1:])
if specs[0] == "qnormal":
return hopt.hp.qnormal(arg, *specs[1:])
if specs[0] == "lognormal":
return hopt.hp.lognormal(arg, np.log(specs[1]), np.log(specs[2]))
if specs[0] == "qlognormal":
return hopt.hp.qlognormal(arg, np.log(specs[1]), np.log[specs[2]], specs[3])
def _get_report_path(exp_name, base_path=None):
base_path = "." if base_path is None else base_path
report_path = path.join(base_path, exp_name, "results")
if not (path.isdir(base_path)):
os.mkdir(base_path)
if not (path.isdir(path.join(base_path, exp_name))):
os.mkdir(path.join(base_path, exp_name))
if not (path.isdir(report_path)):
os.mkdir(report_path)
return report_path
def research(objective, dataset, config_path, report_path=None):
"""
Wrapper for hyperopt fmin function. Will run hyperopt fmin on the
objective function passed as argument, on the data stored in the
dataset argument.
Note
----
Installation of :mod:`hyperopt` is required to use this function.
Parameters
----------
objective : Callable
Objective function defining the function to
optimize. Must be able to receive the dataset argument and
all parameters sampled by hyperopt during the search. These
parameters must be keyword arguments only without default value
(this can be achieved by separating them from the other arguments
with an empty starred expression. See examples for more info.)
dataset : tuple or lists or arrays of data
Argument used to pass data to the objective function during
the hyperopt run. It will be passed as is to the objective
function : it can be in whatever format.
config_path : str or Path
Path to the hyperopt experimentation configuration file used to
define this run.
report_path : str, optional
Path to the directory where to store the results of the run. By default,
this directory is set to be {name of the experiment}/results/.
"""
import hyperopt as hopt
config = _get_conf_from_json(config_path)
report_path = _get_report_path(config["exp"], report_path)
def objective_wrapper(kwargs):
try:
start = time.time()
returned_dict = objective(dataset, config, **kwargs)
end = time.time()
duration = end - start
returned_dict["status"] = hopt.STATUS_OK
returned_dict["start_time"] = start
returned_dict["duration"] = duration
save_file = f"{returned_dict['loss']:.7f}_hyperopt_results"
except Exception as e:
raise e
start = time.time()
returned_dict = {
"status": hopt.STATUS_FAIL,
"start_time": start,
"error": str(e),
}
save_file = f"ERR{start}_hyperopt_results"
try:
json_dict = {"returned_dict": returned_dict, "current_params": kwargs}
save_file = path.join(report_path, save_file)
nb_save_file_with_same_loss = len(glob(f"{save_file}*"))
save_file = f"{save_file}_{nb_save_file_with_same_loss+1}call.json"
with open(save_file, "w+") as f:
json.dump(json_dict, f)
except Exception as e:
warnings.warn(
"Results of current simulation were NOT saved "
"correctly to JSON file."
)
warnings.warn(str(e))
return returned_dict
search_space = config["hp_space"]
trials = hopt.Trials()
if config.get("seed") is None:
rs = np.random.default_rng()
else:
rs = np.random.default_rng(config["seed"])
best = hopt.fmin(
objective_wrapper,
space=search_space,
algo=config["hp_method"],
max_evals=config["hp_max_evals"],
trials=trials,
rstate=rs,
)
return best, trials | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/hyper/_hypersearch.py | 0.476336 | 0.165391 | _hypersearch.py | pypi |
# Author: Nathan Trouvain at 2020 <nathan.trouvain@inria.fr>
# Licence: MIT License
# Copyright: Xavier Hinaut (2018) <xavier.hinaut@inria.fr>
import collections
import os
from typing import Union
import numpy as np
from joblib import Memory
from numpy.random import Generator, RandomState
from scipy.fft import fft, ifft
from scipy.integrate import solve_ivp
from .. import _TEMPDIR
from ..utils.random import rand_generator
from ..utils.validation import check_vector
from ._seed import get_seed
memory = Memory(os.path.join(_TEMPDIR, "datasets"), verbose=0)
def _mg_eq(xt, xtau, a=0.2, b=0.1, n=10):
"""
Mackey-Glass time delay diffential equation, at values x(t) and x(t-tau).
"""
return -b * xt + a * xtau / (1 + xtau**n)
def _mg_rk4(xt, xtau, a, b, n, h=1.0):
"""
Runge-Kuta method (RK4) for Mackey-Glass timeseries discretization.
"""
k1 = h * _mg_eq(xt, xtau, a, b, n)
k2 = h * _mg_eq(xt + 0.5 * k1, xtau, a, b, n)
k3 = h * _mg_eq(xt + 0.5 * k2, xtau, a, b, n)
k4 = h * _mg_eq(xt + k3, xtau, a, b, n)
return xt + k1 / 6 + k2 / 3 + k3 / 3 + k4 / 6
def henon_map(
n_timesteps: int,
a: float = 1.4,
b: float = 0.3,
x0: Union[list, np.ndarray] = [0.0, 0.0],
) -> np.ndarray:
"""Hénon map discrete timeseries [2]_ [3]_.
.. math::
x(n+1) &= 1 - ax(n)^2 + y(n)\\\\
y(n+1) &= bx(n)
Parameters
----------
n_timesteps : int
Number of timesteps to generate.
a : float, default to 1.4
:math:`a` parameter of the system.
b : float, default to 0.3
:math:`b` parameter of the system.
x0 : array-like of shape (2,), default to [0.0, 0.0]
Initial conditions of the system.
Returns
-------
array of shape (n_timesteps, 2)
Hénon map discrete timeseries.
References
----------
.. [2] M. Hénon, ‘A two-dimensional mapping with a strange
attractor’, Comm. Math. Phys., vol. 50, no. 1, pp. 69–77, 1976.
.. [3] `Hénon map <https://en.wikipedia.org/wiki/H%C3%A9non_map>`_
on Wikipédia
"""
states = np.zeros((n_timesteps, 2))
states[0] = np.asarray(x0)
for i in range(1, n_timesteps):
states[i][0] = 1 - a * states[i - 1][0] ** 2 + states[i - 1][1]
states[i][1] = b * states[i - 1][0]
return states
def logistic_map(n_timesteps: int, r: float = 3.9, x0: float = 0.5) -> np.ndarray:
"""Logistic map discrete timeseries [4]_ [5]_.
.. math::
x(n+1) = rx(n)(1-x(n))
Parameters
----------
n_timesteps : int
Number of timesteps to generate.
r : float, default to 3.9
:math:`r` parameter of the system.
x0 : float, default to 0.5
Initial condition of the system.
Returns
-------
array of shape (n_timesteps, 1)
Logistic map discrete timeseries.
References
----------
.. [4] R. M. May, ‘Simple mathematical models with very
complicated dynamics’, Nature, vol. 261, no. 5560,
Art. no. 5560, Jun. 1976, doi: 10.1038/261459a0.
.. [5] `Logistic map <https://en.wikipedia.org/wiki/Logistic_map>`_
on Wikipédia
"""
if r > 0 and 0 < x0 < 1:
X = np.zeros(n_timesteps)
X[0] = x0
for i in range(1, n_timesteps):
X[i] = r * X[i - 1] * (1 - X[i - 1])
return X.reshape(-1, 1)
elif r <= 0:
raise ValueError("r should be positive.")
else:
raise ValueError("Initial condition x0 should be in ]0;1[.")
def lorenz(
n_timesteps: int,
rho: float = 28.0,
sigma: float = 10.0,
beta: float = 8.0 / 3.0,
x0: Union[list, np.ndarray] = [1.0, 1.0, 1.0],
h: float = 0.03,
**kwargs,
) -> np.ndarray:
"""Lorenz attractor timeseries as defined by Lorenz in 1963 [6]_ [7]_.
.. math::
\\frac{\\mathrm{d}x}{\\mathrm{d}t} &= \\sigma (y-x) \\\\
\\frac{\\mathrm{d}y}{\\mathrm{d}t} &= x(\\rho - z) - y \\\\
\\frac{\\mathrm{d}z}{\\mathrm{d}t} &= xy - \\beta z
Parameters
----------
n_timesteps : int
Number of timesteps to generate.
rho : float, default to 28.0
:math:`\\rho` parameter of the system.
sigma : float, default to 10.0
:math:`\\sigma` parameter of the system.
beta : float, default to 8/3
:math:`\\beta` parameter of the system.
x0 : array-like of shape (3,), default to [1.0, 1.0, 1.0]
Initial conditions of the system.
h : float, default to 0.03
Time delta between two discrete timesteps.
**kwargs:
Other parameters to pass to the `scipy.integrate.solve_ivp`
solver.
Returns
-------
array of shape (n_timesteps, 3)
Lorenz attractor timeseries.
References
----------
.. [6] E. N. Lorenz, ‘Deterministic Nonperiodic Flow’,
Journal of the Atmospheric Sciences, vol. 20, no. 2,
pp. 130–141, Mar. 1963,
doi: 10.1175/1520-0469(1963)020<0130:DNF>2.0.CO;2.
.. [7] `Lorenz system <https://en.wikipedia.org/wiki/Lorenz_system>`_
on Wikipedia.
"""
def lorenz_diff(t, state):
x, y, z = state
return sigma * (y - x), x * (rho - z) - y, x * y - beta * z
t_eval = np.arange(0.0, n_timesteps * h, h)
sol = solve_ivp(
lorenz_diff, y0=x0, t_span=(0.0, n_timesteps * h), t_eval=t_eval, **kwargs
)
return sol.y.T
def mackey_glass(
n_timesteps: int,
tau: int = 17,
a: float = 0.2,
b: float = 0.1,
n: int = 10,
x0: float = 1.2,
h: float = 1.0,
seed: Union[int, RandomState, Generator] = None,
) -> np.ndarray:
"""Mackey-Glass timeseries [8]_ [9]_, computed from the Mackey-Glass
delayed differential equation.
.. math::
\\frac{x}{t} = \\frac{ax(t-\\tau)}{1+x(t-\\tau)^n} - bx(t)
Parameters
----------
n_timesteps : int
Number of timesteps to compute.
tau : int, default to 17
Time delay :math:`\\tau` of Mackey-Glass equation.
By defaults, equals to 17. Other values can
change the choatic behaviour of the timeseries.
a : float, default to 0.2
:math:`a` parameter of the equation.
b : float, default to 0.1
:math:`b` parameter of the equation.
n : int, default to 10
:math:`n` parameter of the equation.
x0 : float, optional, default to 1.2
Initial condition of the timeseries.
h : float, default to 1.0
Time delta between two discrete timesteps.
seed : int or :py:class:`numpy.random.Generator`, optional
Random state seed for reproducibility.
Returns
-------
array of shape (n_timesteps, 1)
Mackey-Glass timeseries.
Note
----
As Mackey-Glass is defined by delayed time differential equations,
the first timesteps of the timeseries can't be initialized at 0
(otherwise, the first steps of computation involving these
not-computed-yet-timesteps would yield inconsistent results).
A random number generator is therefore used to produce random
initial timesteps based on the value of the initial condition
passed as parameter. A default seed is hard-coded to ensure
reproducibility in any case. It can be changed with the
:py:func:`set_seed` function.
References
----------
.. [8] M. C. Mackey and L. Glass, ‘Oscillation and chaos in
physiological
control systems’, Science, vol. 197, no. 4300, pp. 287–289,
Jul. 1977,
doi: 10.1126/science.267326.
.. [9] `Mackey-Glass equations
<https://en.wikipedia.org/wiki/Mackey-Glass_equations>`_
on Wikipedia.
"""
# a random state is needed as the method used to discretize
# the timeseries needs to use randomly generated initial steps
# based on the initial condition passed as parameter.
if seed is None:
seed = get_seed()
rs = rand_generator(seed)
# generate random first step based on the value
# of the initial condition
history_length = int(np.floor(tau / h))
history = collections.deque(
x0 * np.ones(history_length) + 0.2 * (rs.random(history_length) - 0.5)
)
xt = x0
X = np.zeros(n_timesteps)
for i in range(0, n_timesteps):
X[i] = xt
if tau == 0:
xtau = 0.0
else:
xtau = history.popleft()
history.append(xt)
xth = _mg_rk4(xt, xtau, a=a, b=b, n=n)
xt = xth
return X.reshape(-1, 1)
def multiscroll(
n_timesteps: int,
a: float = 40.0,
b: float = 3.0,
c: float = 28.0,
x0: Union[list, np.ndarray] = [-0.1, 0.5, -0.6],
h: float = 0.01,
) -> np.ndarray:
"""Double scroll attractor timeseries [10]_ [11]_,
a particular case of multiscroll attractor timeseries.
.. math::
\\frac{\\mathrm{d}x}{\\mathrm{d}t} &= a(y - x) \\\\
\\frac{\\mathrm{d}y}{\\mathrm{d}t} &= (c - a)x - xz + cy \\\\
\\frac{\\mathrm{d}z}{\\mathrm{d}t} &= xy - bz
Parameters
----------
n_timesteps : int
Number of timesteps to generate.
a : float, default to 40.0
:math:`a` parameter of the system.
b : float, default to 3.0
:math:`b` parameter of the system.
c : float, default to 28.0
:math:`c` parameter of the system.
x0 : array-like of shape (3,), default to [-0.1, 0.5, -0.6]
Initial conditions of the system.
h : float, default to 0.01
Time delta between two discrete timesteps.
Returns
-------
array of shape (n_timesteps, 3)
Multiscroll attractor timeseries.
References
----------
.. [10] G. Chen and T. Ueta, ‘Yet another chaotic attractor’,
Int. J. Bifurcation Chaos, vol. 09, no. 07, pp. 1465–1466,
Jul. 1999, doi: 10.1142/S0218127499001024.
.. [11] `Chen double scroll attractor
<https://en.wikipedia.org/wiki/Multiscroll_attractor
#Chen_attractor>`_
on Wikipedia.
"""
def multiscroll_diff(t, state):
x, y, z = state
dx = a * (y - x)
dy = (c - a) * x - x * z + c * y
dz = x * y - b * z
return dx, dy, dz
t = np.arange(0.0, n_timesteps * h, h)
sol = solve_ivp(
multiscroll_diff, y0=x0, t_span=(0.0, n_timesteps * h), dense_output=True
)
return sol.sol(t).T
def doublescroll(
n_timesteps: int,
r1: float = 1.2,
r2: float = 3.44,
r4: float = 0.193,
ir: float = 2 * 2.25e-5,
beta: float = 11.6,
x0: Union[list, np.ndarray] = [0.37926545, 0.058339, -0.08167691],
h: float = 0.25,
**kwargs,
) -> np.ndarray:
"""Double scroll attractor timeseries [10]_ [11]_,
a particular case of multiscroll attractor timeseries.
.. math::
\\frac{\\mathrm{d}V_1}{\\mathrm{d}t} &= \\frac{V_1}{R_1} - \\frac{\\Delta V}{R_2} -
2I_r \\sinh(\\beta\\Delta V) \\\\
\\frac{\\mathrm{d}V_2}{\\mathrm{d}t} &= \\frac{\\Delta V}{R_2} +2I_r \\sinh(\\beta\\Delta V) - I\\\\
\\frac{\\mathrm{d}I}{\\mathrm{d}t} &= V_2 - R_4 I
where :math:`\\Delta V = V_1 - V_2`.
Parameters
----------
n_timesteps : int
Number of timesteps to generate.
r1 : float, default to 1.2
:math:`R_1` parameter of the system.
r2 : float, default to 3.44
:math:`R_2` parameter of the system.
r4 : float, default to 0.193
:math:`R_4` parameter of the system.
ir : float, default to 2*2e.25e-5
:math:`I_r` parameter of the system.
beta : float, default to 11.6
:math:`\\beta` parameter of the system.
x0 : array-like of shape (3,), default to [0.37926545, 0.058339, -0.08167691]
Initial conditions of the system.
h : float, default to 0.01
Time delta between two discrete timesteps.
Returns
-------
array of shape (n_timesteps, 3)
Multiscroll attractor timeseries.
References
----------
.. [10] G. Chen and T. Ueta, ‘Yet another chaotic attractor’,
Int. J. Bifurcation Chaos, vol. 09, no. 07, pp. 1465–1466,
Jul. 1999, doi: 10.1142/S0218127499001024.
.. [11] `Chen double scroll attractor
<https://en.wikipedia.org/wiki/Multiscroll_attractor
#Chen_attractor>`_
on Wikipedia.
"""
def doublescroll(t, state):
V1, V2, i = state
dV = V1 - V2
factor = (dV / r2) + ir * np.sinh(beta * dV)
dV1 = (V1 / r1) - factor
dV2 = factor - i
dI = V2 - r4 * i
return dV1, dV2, dI
t_eval = np.arange(0.0, n_timesteps * h, h)
sol = solve_ivp(
doublescroll, y0=x0, t_span=(0.0, n_timesteps * h), t_eval=t_eval, **kwargs
)
return sol.y.T
def rabinovich_fabrikant(
n_timesteps: int,
alpha: float = 1.1,
gamma: float = 0.89,
x0: Union[list, np.ndarray] = [-1, 0, 0.5],
h: float = 0.05,
**kwargs,
) -> np.ndarray:
"""Rabinovitch-Fabrikant system [12]_ [13]_ timeseries.
.. math::
\\frac{\\mathrm{d}x}{\\mathrm{d}t} &= y(z - 1 + x^2) + \\gamma x \\\\
\\frac{\\mathrm{d}y}{\\mathrm{d}t} &= x(3z + 1 - x^2) + \\gamma y \\\\
\\frac{\\mathrm{d}z}{\\mathrm{d}t} &= -2z(\\alpha + xy)
Parameters
----------
n_timesteps : int
Number of timesteps to generate.
alpha : float, default to 1.1
:math:`\\alpha` parameter of the system.
gamma : float, default to 0.89
:math:`\\gamma` parameter of the system.
x0 : array-like of shape (3,), default to [-1, 0, 0.5]
Initial conditions of the system.
h : float, default to 0.05
Time delta between two discrete timesteps.
**kwargs:
Other parameters to pass to the `scipy.integrate.solve_ivp`
solver.
Returns
-------
array of shape (n_timesteps, 3)
Rabinovitch-Fabrikant system timeseries.
References
----------
.. [12] M. I. Rabinovich and A. L. Fabrikant,
‘Stochastic self-modulation of waves in
nonequilibrium media’, p. 8, 1979.
.. [13] `Rabinovich-Fabrikant equations
<https://en.wikipedia.org/wiki/Rabinovich%E2%80
%93Fabrikant_equations>`_
on Wikipedia.
"""
def rabinovich_fabrikant_diff(t, state):
x, y, z = state
dx = y * (z - 1 + x**2) + gamma * x
dy = x * (3 * z + 1 - x**2) + gamma * y
dz = -2 * z * (alpha + x * y)
return dx, dy, dz
t_eval = np.arange(0.0, n_timesteps * h, h)
sol = solve_ivp(
rabinovich_fabrikant_diff,
y0=x0,
t_span=(0.0, n_timesteps * h),
t_eval=t_eval,
**kwargs,
)
return sol.y.T
def narma(
n_timesteps: int,
order: int = 30,
a1: float = 0.2,
a2: float = 0.04,
b: float = 1.5,
c: float = 0.001,
x0: Union[list, np.ndarray] = [0.0],
seed: Union[int, RandomState] = None,
) -> np.ndarray:
"""Non-linear Autoregressive Moving Average (NARMA) timeseries,
as first defined in [14]_, and as used in [15]_.
NARMA n-th order dynamical system is defined by the recurrent relation:
.. math::
y[t+1] = a_1 y[t] + a_2 y[t] (\\sum_{i=0}^{n-1} y[t-i]) + b u[t-(
n-1)]u[t] + c
where :math:`u[t]` are sampled following a uniform distribution in
:math:`[0, 0.5]`.
Parameters
----------
n_timesteps : int
Number of timesteps to generate.
order: int, default to 30
Order of the system.
a1 : float, default to 0.2
:math:`a_1` parameter of the system.
a2 : float, default to 0.04
:math:`a_2` parameter of the system.
b : float, default to 1.5
:math:`b` parameter of the system.
c : float, default to 0.001
:math:`c` parameter of the system.
x0 : array-like of shape (init_steps,), default to [0.0]
Initial conditions of the system.
seed : int or :py:class:`numpy.random.Generator`, optional
Random state seed for reproducibility.
Returns
-------
array of shape (n_timesteps, 1)
NARMA timeseries.
References
----------
.. [14] A. F. Atiya and A. G. Parlos, ‘New results on recurrent
network training: unifying the algorithms and accelerating
convergence,‘ in IEEE Transactions on Neural Networks,
vol. 11, no. 3, pp. 697-709, May 2000,
doi: 10.1109/72.846741.
.. [15] B.Schrauwen, M. Wardermann, D. Verstraeten, J. Steil,
D. Stroobandt, ‘Improving reservoirs using intrinsic
plasticity‘,
Neurocomputing, 71. 1159-1171, 2008,
doi: 10.1016/j.neucom.2007.12.020.
"""
if seed is None:
seed = get_seed()
rs = rand_generator(seed)
y = np.zeros((n_timesteps + order, 1))
x0 = check_vector(np.atleast_2d(np.asarray(x0)))
y[: x0.shape[0], :] = x0
noise = rs.uniform(0, 0.5, size=(n_timesteps + order, 1))
for t in range(order, n_timesteps + order - 1):
y[t + 1] = (
a1 * y[t]
+ a2 * y[t] * np.sum(y[t - order : t])
+ b * noise[t - order] * noise[t]
+ c
)
return y[order:, :]
def lorenz96(
n_timesteps: int,
warmup: int = 0,
N: int = 36,
F: float = 8.0,
dF: float = 0.01,
h: float = 0.01,
x0: Union[list, np.ndarray] = None,
**kwargs,
) -> np.ndarray:
"""Lorenz96 attractor timeseries as defined by Lorenz in 1996 [17]_.
.. math::
\\frac{\\mathrm{d}x_i}{\\mathrm{d} t} = (x_{i+1} - x_{i-2}) x_{i-1} - x_i + F
where :math:`i = 1, \\dots, N` and :math:`x_{-1} = x_{N-1}`
and :math:`x_{N+1} = x_1` and :math:`N \\geq 4`.
Parameters
----------
n_timesteps : int
Number of timesteps to generate.
warmup : int, default to 0
Number of timesteps to discard at the begining of the signal, to remove
transient states.
N: int, default to 36
Dimension of the system.
F : float, default to F
:math:`F` parameter of the system.
dF : float, default to 0.01
Pertubation applied to initial condition if x0 is None.
h : float, default to 0.01
Time delta between two discrete timesteps.
x0 : array-like of shape (N,), default to None
Initial conditions of the system. If None, the array is initialized to
an array of shape (N, ) with value F, except for the first value of the
array that takes the value F + dF.
**kwargs:
Other parameters to pass to the `scipy.integrate.solve_ivp`
solver.
Returns
-------
array of shape (n_timesteps - warmup, N)
Lorenz96 timeseries.
References
----------
.. [17] Lorenz, E. N. (1996, September).
Predictability: A problem partly solved. In Proc.
Seminar on predictability (Vol. 1, No. 1).
"""
if N < 4:
raise ValueError("N must be >= 4.")
if x0 is None:
x0 = F * np.ones(N)
x0[0] = F + dF
if len(x0) != N:
raise ValueError(
f"x0 should have shape ({N},), but have shape {np.asarray(x0).shape}"
)
def lorenz96_diff(t, state):
ds = np.zeros(N)
for i in range(N):
ds[i] = (state[(i + 1) % N] - state[i - 2]) * state[i - 1] - state[i] + F
return ds
t_eval = np.arange(0.0, (warmup + n_timesteps) * h, h)
sol = solve_ivp(
lorenz96_diff,
y0=x0,
t_span=(0.0, (warmup + n_timesteps) * h),
t_eval=t_eval,
**kwargs,
)
return sol.y.T[warmup:]
def rossler(
n_timesteps: int,
a: float = 0.2,
b: float = 0.2,
c: float = 5.7,
x0: Union[list, np.ndarray] = [-0.1, 0.0, 0.02],
h: float = 0.1,
**kwargs,
) -> np.ndarray:
"""Rössler attractor timeseries [18]_.
.. math::
\\frac{\\mathrm{d}x}{\\mathrm{d}t} &= -y - z \\\\
\\frac{\\mathrm{d}y}{\\mathrm{d}t} &= x + a y \\\\
\\frac{\\mathrm{d}z}{\\mathrm{d}t} &= b + z (x - c)
Parameters
----------
n_timesteps : int
Number of timesteps to generate.
a : float, default to 0.2
:math:`a` parameter of the system.
b : float, default to 0.2
:math:`b` parameter of the system.
c : float, default to 5.7
:math:`c` parameter of the system.
x0 : array-like of shape (3,), default to [-0.1, 0.0, 0.02]
Initial conditions of the system.
h : float, default to 0.1
Time delta between two discrete timesteps.
**kwargs:
Other parameters to pass to the `scipy.integrate.solve_ivp`
solver.
Returns
-------
array of shape (n_timesteps, 3)
Rössler attractor timeseries.
References
----------
.. [18] O.E. Rössler, "An equation for continuous chaos", Physics Letters A,
vol 57, Issue 5, Pages 397-398, ISSN 0375-9601, 1976,
https://doi.org/10.1016/0375-9601(76)90101-8.
"""
if len(x0) != 3:
raise ValueError(
f"x0 should have shape (3,), but have shape {np.asarray(x0).shape}"
)
def rossler_diff(t, state):
x, y, z = state
dx = -y - z
dy = x + a * y
dz = b + z * (x - c)
return dx, dy, dz
t_eval = np.arange(0.0, n_timesteps * h, h)
sol = solve_ivp(
rossler_diff, y0=x0, t_span=(0.0, n_timesteps * h), t_eval=t_eval, **kwargs
)
return sol.y.T
def _kuramoto_sivashinsky_etdrk4(v, *, g, E, E2, Q, f1, f2, f3):
"""A single step of EDTRK4 to solve Kuramoto-Sivashinsky equation.
Kassam, A. K., & Trefethen, L. N. (2005). Fourth-order time-stepping for stiff PDEs.
SIAM Journal on Scientific Computing, 26(4), 1214-1233.
"""
Nv = g * fft(np.real(ifft(v)) ** 2)
a = E2 * v + Q * Nv
Na = g * fft(np.real(ifft(a)) ** 2)
b = E2 * v + Q * Na
Nb = g * fft(np.real(ifft(b)) ** 2)
c = E2 * a + Q * (2 * Nb - Nv)
Nc = g * fft(np.real(ifft(c)) ** 2)
v = E * v + Nv * f1 + 2 * (Na + Nb) * f2 + Nc * f3
return v
@memory.cache
def _kuramoto_sivashinsky(n_timesteps, *, warmup, N, M, x0, h):
# initial conditions
v0 = fft(x0)
# ETDRK4 scalars
k = np.conj(np.r_[np.arange(0, N / 2), [0], np.arange(-N / 2 + 1, 0)]) / M
L = k**2 - k**4
E = np.exp(h * L)
E2 = np.exp(h * L / 2)
r = np.exp(1j * np.pi * (np.arange(1, M + 1) - 0.5) / M)
LR = h * np.transpose(np.repeat([L], M, axis=0)) + np.repeat([r], N, axis=0)
Q = h * np.real(np.mean((np.exp(LR / 2) - 1) / LR, axis=1))
f1 = (-4 - LR + np.exp(LR) * (4 - 3 * LR + LR**2)) / LR**3
f1 = h * np.real(np.mean(f1, axis=1))
f2 = (2 + LR + np.exp(LR) * (-2 + LR)) / LR**3
f2 = h * np.real(np.mean(f2, axis=1))
f3 = (-4 - 3 * LR - LR**2 + np.exp(LR) * (4 - LR)) / LR**3
f3 = h * np.real(np.mean(f3, axis=1))
g = -0.5j * k
# integration using ETDRK4 method
v = np.zeros((n_timesteps, N), dtype=complex)
v[0] = v0
for n in range(1, n_timesteps):
v[n] = _kuramoto_sivashinsky_etdrk4(
v[n - 1], g=g, E=E, E2=E2, Q=Q, f1=f1, f2=f2, f3=f3
)
return np.real(ifft(v[warmup:]))
def kuramoto_sivashinsky(
n_timesteps: int,
warmup: int = 0,
N: int = 128,
M: float = 16,
x0: Union[list, np.ndarray] = None,
h: float = 0.25,
) -> np.ndarray:
"""Kuramoto-Sivashinsky oscillators [19]_ [20]_ [21]_.
.. math::
y_t = -yy_x - y_{xx} - y_{xxxx}, ~~ x \\in [0, 32\\pi]
This 1D partial differential equation is solved using ETDRK4
(Exponential Time-Differencing 4th order Runge-Kutta) method, as described in [22]_.
Parameters
----------
n_timesteps : int
Number of timesteps to generate.
warmup : int, default to 0
Number of timesteps to discard at the begining of the signal, to remove
transient states.
N : int, default to 128
Dimension of the system.
M : float, default to 0.2
Number of points for complex means. Modify beahviour of the resulting
multivariate timeseries.
x0 : array-like of shape (N,), default to None.
Initial conditions of the system. If None, x0 is equal to
:math:`\\cos (\\frac{y}{M}) * (1 + \\sin(\\frac{y}{M}))`
with :math:`y = 2M\\pi x / N, ~~ x \\in [1, N]`.
h : float, default to 0.25
Time delta between two discrete timesteps.
Returns
-------
array of shape (n_timesteps - warmup, N)
Kuramoto-Sivashinsky equation solution.
References
----------
.. [19] Kuramoto, Y. (1978). Diffusion-Induced Chaos in Reaction Systems.
Progress of Theoretical Physics Supplement, 64, 346–367.
https://doi.org/10.1143/PTPS.64.346
.. [20] Sivashinsky, G. I. (1977). Nonlinear analysis of hydrodynamic instability
in laminar flames—I. Derivation of basic equations.
Acta Astronautica, 4(11), 1177–1206.
https://doi.org/10.1016/0094-5765(77)90096-0
.. [21] Sivashinsky, G. I. (1980). On Flame Propagation Under Conditions
of Stoichiometry. SIAM Journal on Applied Mathematics, 39(1), 67–82.
https://doi.org/10.1137/0139007
.. [22] Kassam, A. K., & Trefethen, L. N. (2005).
Fourth-order time-stepping for stiff PDEs.
SIAM Journal on Scientific Computing, 26(4), 1214-1233.
"""
if x0 is None:
x = 2 * M * np.pi * np.arange(1, N + 1) / N
x0 = np.cos(x / M) * (1 + np.sin(x / M))
else:
if not np.asarray(x0).shape[0] == N:
raise ValueError(
f"Initial condition x0 should be of shape {N} (= N) but "
f"has shape {np.asarray(x0).shape}"
)
else:
x0 = np.asarray(x0)
return _kuramoto_sivashinsky(n_timesteps, warmup=warmup, N=N, M=M, x0=x0, h=h) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/datasets/_chaos.py | 0.921296 | 0.525064 | _chaos.py | pypi |
from typing import Union
import numpy as np
from ._chaos import (
doublescroll,
henon_map,
kuramoto_sivashinsky,
logistic_map,
lorenz,
lorenz96,
mackey_glass,
multiscroll,
narma,
rabinovich_fabrikant,
rossler,
)
from ._japanese_vowels import japanese_vowels
from ._seed import get_seed, set_seed
__all__ = [
"henon_map",
"logistic_map",
"lorenz",
"mackey_glass",
"multiscroll",
"rabinovich_fabrikant",
"narma",
"doublescroll",
"japanese_vowels",
"lorenz96",
"rossler",
"kuramoto_sivashinsky",
"set_seed",
"get_seed",
"to_forecasting",
]
def to_forecasting(
timeseries: np.ndarray,
forecast: int = 1,
axis: Union[int, float] = 0,
test_size: int = None,
):
"""Split a timeseries for forecasting tasks.
Transform a timeseries :math:`X` into a series of
input values :math:`X_t` and a series of output values
:math:`X_{t+\\mathrm{forecast}}`.
It is also possible to split the timeseries between training
timesteps and testing timesteps.
Parameters
----------
timeseries : np.ndarray
Timeseries to split.
forecast : int, optional
Number of time lag steps between
the timeseries :math:`X_t` and the timeseries
:math:`X_{t+\\mathrm{forecast}}`, by default 1,
i.e. returns two timeseries with a time difference
of 1 timesteps.
axis : int, optional
Time axis of the timeseries, by default 0
test_size : int or float, optional
If set, will also split the timeseries
into a training phase and a testing phase of
``test_size`` timesteps. Can also be specified
as a float ratio, by default None
Returns
-------
tuple of numpy.ndarray
:math:`X_t` and :math:`X_{t+\\mathrm{forecast}}`.
If ``test_size`` is specified, will return:
:math:`X_t`, :math:`X_t^{test}`,
:math:`X_{t+\\mathrm{forecast}}`, :math:`X_{t+\\mathrm{forecast}}^{test}`.
The size of the returned timeseries is therefore the size of
:math:`X` minus the forecasting length ``forecast``.
Raises
------
ValueError
If ``test_size`` is a float, it must be in [0, 1[.
"""
series_ = np.moveaxis(timeseries.view(), axis, 0)
time_len = series_.shape[0]
if test_size is not None:
if isinstance(test_size, float) and test_size < 1 and test_size >= 0:
test_len = round(time_len * test_size)
elif isinstance(test_size, int):
test_len = test_size
else:
raise ValueError(
"invalid test_size argument: "
"test_size can be an integer or a float "
f"in [0, 1[, but is {test_size}."
)
else:
test_len = 0
X = series_[:-forecast]
y = series_[forecast:]
if test_len > 0:
X_t = X[-test_len:]
y_t = y[-test_len:]
X = X[:-test_len]
y = y[:-test_len]
X = np.moveaxis(X, 0, axis)
X_t = np.moveaxis(X_t, 0, axis)
y = np.moveaxis(y, 0, axis)
y_t = np.moveaxis(y_t, 0, axis)
return X, X_t, y, y_t
return np.moveaxis(X, 0, axis), np.moveaxis(y, 0, axis) | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/datasets/__init__.py | 0.941587 | 0.531817 | __init__.py | pypi |
from collections import defaultdict
from typing import Iterable
from uuid import uuid4
import numpy as np
from .._base import check_xy
from .validation import is_mapping, is_sequence_set
def build_forward_sumodels(nodes, edges, already_trained):
"""Separate unfitted offline nodes from fitted nodes and gather all fitted
nodes in submodels."""
from ..model import Model
offline_nodes = [
n for n in nodes if n.is_trained_offline and n not in already_trained
]
forward_nodes = list(set(nodes) - set(offline_nodes))
forward_edges = [edge for edge in edges if edge[1] not in offline_nodes]
submodel = Model(forward_nodes, forward_edges, name=f"SubModel-{uuid4()}")
submodel.already_trained = already_trained
return submodel, offline_nodes
def dist_states_to_next_subgraph(states, relations):
"""Map submodel output state vectors to input nodes of next submodel.
Edges between first and second submodel are stored in 'relations'.
"""
dist_states = {}
for curr_node, next_nodes in relations.items():
if len(next_nodes) > 1:
for next_node in next_nodes:
if dist_states.get(next_node) is None:
dist_states[next_node] = list()
dist_states[next_node].append(states[curr_node])
else:
dist_states[next_nodes[0]] = states[curr_node]
return dist_states
def allocate_returned_states(model, inputs, return_states=None):
"""Allocate output states matrices."""
seq_len = inputs[list(inputs.keys())[0]].shape[0]
# pre-allocate states
if return_states == "all":
states = {n.name: np.zeros((seq_len, n.output_dim)) for n in model.nodes}
elif isinstance(return_states, Iterable):
states = {
n.name: np.zeros((seq_len, n.output_dim))
for n in [model[name] for name in return_states]
}
else:
states = {n.name: np.zeros((seq_len, n.output_dim)) for n in model.output_nodes}
return states
def to_ragged_seq_set(data):
"""Convert dataset from mapping/array of sequences
to lists of mappings of sequences."""
# data is a dict
if is_mapping(data):
new_data = {}
for name, datum in data.items():
if not is_sequence_set(datum):
# all sequences must at least be 2D (seq length, num features)
# 1D sequences are converted to (1, num features) by default.
new_datum = [np.atleast_2d(datum)]
else:
new_datum = datum
new_data[name] = new_datum
return new_data
# data is an array or a list
else:
if not is_sequence_set(data):
if data.ndim < 3:
return [np.atleast_2d(data)]
else:
return data
else:
return data
def build_mapping(nodes, data, io_type="input"):
"""Map input/target data to input/trainable nodes in the model."""
data = to_ragged_seq_set(data)
if not is_mapping(data):
if io_type == "input":
data_map = {n.name: data for n in nodes}
elif io_type == "target":
# Remove unsupervised or already fitted nodes from the mapping
data_map = {n.name: data for n in nodes if not n.unsupervised}
else:
raise ValueError(
f"Unknown io_type: '{io_type}'. "
f"Accepted io_types are 'input' and 'target'."
)
else:
data_map = data.copy()
return data_map
def unfold_mapping(data_map):
"""Convert a mapping of sequence lists into a list of sequence to nodes mappings."""
seq_numbers = [len(data_map[n]) for n in data_map.keys()]
if len(np.unique(seq_numbers)) > 1:
seq_numbers = {n: len(data_map[n]) for n in data_map.keys()}
raise ValueError(
f"Found dataset with inconsistent number of sequences for each node. "
f"Current number of sequences per node: {seq_numbers}"
)
# select an input dataset and check
n_sequences = len(data_map[list(data_map.keys())[0]])
mapped_sequences = []
for i in range(n_sequences):
sequence = {n: data_map[n][i] for n in data_map.keys()}
mapped_sequences.append(sequence)
return mapped_sequences
def fold_mapping(model, states, return_states):
"""Convert a list of sequence to nodes mappings into a mapping of lists or a
simple array if possible."""
n_sequences = len(states)
if n_sequences == 1:
states_map = states[0]
else:
states_map = defaultdict(list)
for i in range(n_sequences):
for node_name, seq in states[i].items():
states_map[node_name] += [seq]
if len(states_map) == 1 and return_states is None:
return states_map[model.output_nodes[0].name]
return states_map
def to_data_mapping(model, X, Y=None):
"""Map dataset to input/target nodes in the model."""
X_map = build_mapping(model.input_nodes, X, io_type="input")
Y_map = None
if Y is not None:
Y_map = build_mapping(model.trainable_nodes, Y, io_type="target")
X_map, Y_map = check_xy(model, x=X_map, y=Y_map)
X_sequences = unfold_mapping(X_map)
if Y_map is None:
n_sequences = len(X_sequences)
Y_sequences = [None] * n_sequences
else:
Y_sequences = unfold_mapping(Y_map)
return X_sequences, Y_sequences | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/utils/model_utils.py | 0.878757 | 0.624294 | model_utils.py | pypi |
import json
import pathlib
import re
from typing import Union
import dill
import numpy as np
from scipy import sparse
from ..activationsfunc import identity
from ..mat_gen import zeros
from ..nodes import ESN as ESN_v3
from ..nodes import Reservoir, Ridge
from . import regression_models
from ._esn import ESN
from ._esn_online import ESNOnline
from .utils.save import load
def _load_files_from_v2(dirpath):
dirpath = pathlib.Path(dirpath)
matrices = dict()
config = dict()
for filename in dirpath.iterdir():
ext = filename.suffix
# all rpy <= 0.2.4 versions have file names starting with esn
is_esn_obj = "esn" in filename.name[:3]
if is_esn_obj:
if ext in (".npy", ".npz"):
matrix_name = ("Win", "W", "Wfb", "Wout", "_W", "_Win", "_Wfb", "_Wout")
match = re.findall("_?W.*?(?=-)", filename.name)
for name in matrix_name:
if name in match:
n = name
if name.startswith("_"):
n = name[1:]
matrices[n] = _load_matrix_v2(filename)
elif ext == ".json":
with filename.open(mode="r+") as fp:
config = json.load(fp)
fns = dict()
for attr, value in config.items():
if attr == "fbfunc" and value is not None:
filename = pathlib.Path(value)
if filename.exists():
fns["fbfunc"] = dill.load(filename)
return matrices, fns, config
def _load_matrix_v2(filename):
ext = filename.suffix
mat = None
if ext == ".npy":
mat = np.load(str(filename))
elif ext == ".npz": # maybe a scipy sparse array
try:
mat = sparse.load_npz(str(filename))
except Exception as e:
mat = np.load(str(filename))
keys = list(mat.keys())
sparse_keys = ("indices", "indptr", "format", "shape", "data")
if any([k in sparse_keys for k in keys]):
raise e
elif len(keys) == 1: # Only one array per file
mat = mat[keys[0]]
else:
raise TypeError("Unknown array format in file {filename}.")
return mat
def load_compat(directory: Union[str, pathlib.Path]) -> ESN_v3:
"""Load a ReservoirPy v0.2.4 and lower ESN model as a
ReservoirPy v0.3 model.
.. warning::
Models and Nodes should now
be saved using Python serialization utilities
`pickle`.
Parameters
----------
directory : str or Path
Returns
-------
reservoirpy.nodes.ESN
A ReservoirPy v0.3 ESN instance.
"""
dirpath = pathlib.Path(directory)
if not dirpath.exists():
raise NotADirectoryError(f"'{directory}' not found.")
matrices, fns, config = _load_files_from_v2(dirpath)
attr = config.get("attr", config)
version = config.get("version")
msg = "Impossible to load ESN from version {} of reservoirpy: unknown model {}"
ridge = 0.0
if attr.get("sklearn_model") is not None:
raise TypeError(msg.format(version, attr["sklearn_model"]))
elif attr.get("_ridge") is not None:
ridge = attr["_ridge"]
if attr.get("reg_model") is not None:
reg_model = attr["reg_model"]
if reg_model["type"] not in ("ridge", "pinv"):
raise TypeError(msg.format(version, attr["type"]))
elif reg_model["type"] == "ridge":
ridge = reg_model.get("coef", 0.0)
feedback = False
if matrices.get("Wfb") is not None:
feedback = True
output_dim = attr.get("dim_out", attr.get("_dim_out"))
reservoir = Reservoir(
units=attr.get("N", attr.get("_N")),
lr=attr["lr"],
input_bias=attr.get("in_bias", attr.get("_input_bias")),
W=matrices["W"],
Win=matrices["Win"],
Wfb=matrices.get("Wfb"),
fb_activation=fns.get("fbfunc", identity),
noise_in=attr.get("noise_in", 0.0),
noise_rc=attr.get("noise_rc", 0.0),
noise_fb=attr.get("noise_out", 0.0),
noise_type="uniform",
seed=attr.get("seed"),
)
W = matrices.get("Wout")
if W is None:
Wout = zeros
bias = zeros
else:
Wout = W[:, 1:]
bias = W[:, :1]
readout = Ridge(
output_dim=output_dim, ridge=ridge, Wout=Wout, bias=bias, input_bias=True
)
model = ESN_v3(reservoir=reservoir, readout=readout, feedback=feedback)
return model
__all__ = ["ESN", "ESNOnline", "load_compat", "regression_models", "load"] | /reservoirpy-fbessou-0.3.13.tar.gz/reservoirpy-fbessou-0.3.13/reservoirpy/compat/__init__.py | 0.719384 | 0.150372 | __init__.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.