repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
RJT1990/pyflux
pyflux/families/poisson.py
Poisson.approximating_model_reg
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no): """ Creates approximating Gaussian model for Poisson measurement density - dynamic regression model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables T, Z, R, Q : np.array State space matrices used in KFS algorithm data: np.array The univariate time series data X: np.array The regressors state_no : int Number of states Returns ---------- H : np.array Approximating measurement variance matrix mu : np.array Approximating measurement constants """ H = np.ones(data.shape[0]) mu = np.zeros(data.shape[0]) alpha = np.zeros([state_no, data.shape[0]]) tol = 100.0 it = 0 while tol > 10**-7 and it < 5: old_alpha = np.sum(X*alpha.T,axis=1) alpha, V = nld_univariate_KFS(data,Z,H,T,Q,R,mu) H = np.exp(-np.sum(X*alpha.T,axis=1)) mu = data - np.sum(X*alpha.T,axis=1) - np.exp(-np.sum(X*alpha.T,axis=1))*(data - np.exp(np.sum(X*alpha.T,axis=1))) tol = np.mean(np.abs(np.sum(X*alpha.T,axis=1)-old_alpha)) it += 1 return H, mu
python
def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no): """ Creates approximating Gaussian model for Poisson measurement density - dynamic regression model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables T, Z, R, Q : np.array State space matrices used in KFS algorithm data: np.array The univariate time series data X: np.array The regressors state_no : int Number of states Returns ---------- H : np.array Approximating measurement variance matrix mu : np.array Approximating measurement constants """ H = np.ones(data.shape[0]) mu = np.zeros(data.shape[0]) alpha = np.zeros([state_no, data.shape[0]]) tol = 100.0 it = 0 while tol > 10**-7 and it < 5: old_alpha = np.sum(X*alpha.T,axis=1) alpha, V = nld_univariate_KFS(data,Z,H,T,Q,R,mu) H = np.exp(-np.sum(X*alpha.T,axis=1)) mu = data - np.sum(X*alpha.T,axis=1) - np.exp(-np.sum(X*alpha.T,axis=1))*(data - np.exp(np.sum(X*alpha.T,axis=1))) tol = np.mean(np.abs(np.sum(X*alpha.T,axis=1)-old_alpha)) it += 1 return H, mu
[ "def", "approximating_model_reg", "(", "self", ",", "beta", ",", "T", ",", "Z", ",", "R", ",", "Q", ",", "h_approx", ",", "data", ",", "X", ",", "state_no", ")", ":", "H", "=", "np", ".", "ones", "(", "data", ".", "shape", "[", "0", "]", ")", ...
Creates approximating Gaussian model for Poisson measurement density - dynamic regression model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables T, Z, R, Q : np.array State space matrices used in KFS algorithm data: np.array The univariate time series data X: np.array The regressors state_no : int Number of states Returns ---------- H : np.array Approximating measurement variance matrix mu : np.array Approximating measurement constants
[ "Creates", "approximating", "Gaussian", "model", "for", "Poisson", "measurement", "density", "-", "dynamic", "regression", "model" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/poisson.py#L86-L131
train
217,400
RJT1990/pyflux
pyflux/families/poisson.py
Poisson.logpdf
def logpdf(self, mu): """ Log PDF for Poisson prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return ss.poisson.logpmf(mu, self.lmd0)
python
def logpdf(self, mu): """ Log PDF for Poisson prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return ss.poisson.logpmf(mu, self.lmd0)
[ "def", "logpdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "return", "ss", ".", "poisson", ".", "logpmf", "(", "mu", ",", "self", ".", "lmd0", ...
Log PDF for Poisson prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu))
[ "Log", "PDF", "for", "Poisson", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/poisson.py#L198-L213
train
217,401
RJT1990/pyflux
pyflux/families/poisson.py
Poisson.setup
def setup(): """ Returns the attributes of this family Notes ---------- - scale notes whether family has a variance parameter (sigma) - shape notes whether family has a tail thickness parameter (nu) - skewness notes whether family has a skewness parameter (gamma) - mean_transform is a function which transforms the location parameter - cythonized notes whether the family has cythonized routines Returns ---------- - model name, link function, scale, shape, skewness, mean_transform, cythonized """ name = "Poisson" link = np.exp scale = False shape = False skewness = False mean_transform = np.log cythonized = True return name, link, scale, shape, skewness, mean_transform, cythonized
python
def setup(): """ Returns the attributes of this family Notes ---------- - scale notes whether family has a variance parameter (sigma) - shape notes whether family has a tail thickness parameter (nu) - skewness notes whether family has a skewness parameter (gamma) - mean_transform is a function which transforms the location parameter - cythonized notes whether the family has cythonized routines Returns ---------- - model name, link function, scale, shape, skewness, mean_transform, cythonized """ name = "Poisson" link = np.exp scale = False shape = False skewness = False mean_transform = np.log cythonized = True return name, link, scale, shape, skewness, mean_transform, cythonized
[ "def", "setup", "(", ")", ":", "name", "=", "\"Poisson\"", "link", "=", "np", ".", "exp", "scale", "=", "False", "shape", "=", "False", "skewness", "=", "False", "mean_transform", "=", "np", ".", "log", "cythonized", "=", "True", "return", "name", ",",...
Returns the attributes of this family Notes ---------- - scale notes whether family has a variance parameter (sigma) - shape notes whether family has a tail thickness parameter (nu) - skewness notes whether family has a skewness parameter (gamma) - mean_transform is a function which transforms the location parameter - cythonized notes whether the family has cythonized routines Returns ---------- - model name, link function, scale, shape, skewness, mean_transform, cythonized
[ "Returns", "the", "attributes", "of", "this", "family" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/poisson.py#L243-L265
train
217,402
RJT1990/pyflux
pyflux/families/poisson.py
Poisson.pdf
def pdf(self, mu): """ PDF for Poisson prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ if self.transform is not None: mu = self.transform(mu) return ss.poisson.pmf(mu, self.lmd0)
python
def pdf(self, mu): """ PDF for Poisson prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ if self.transform is not None: mu = self.transform(mu) return ss.poisson.pmf(mu, self.lmd0)
[ "def", "pdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "return", "ss", ".", "poisson", ".", "pmf", "(", "mu", ",", "self", ".", "lmd0", ")" ...
PDF for Poisson prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu)
[ "PDF", "for", "Poisson", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/poisson.py#L294-L309
train
217,403
RJT1990/pyflux
pyflux/families/poisson.py
Poisson.reg_score_function
def reg_score_function(X, y, mean, scale, shape, skewness): """ GAS Poisson Regression Update term using gradient only - native Python function Parameters ---------- X : float datapoint for the right hand side variable y : float datapoint for the time series mean : float location parameter for the Poisson distribution scale : float scale parameter for the Poisson distribution shape : float tail thickness parameter for the Poisson distribution skewness : float skewness parameter for the Poisson distribution Returns ---------- - Score of the Poisson family """ return X*(y-mean)
python
def reg_score_function(X, y, mean, scale, shape, skewness): """ GAS Poisson Regression Update term using gradient only - native Python function Parameters ---------- X : float datapoint for the right hand side variable y : float datapoint for the time series mean : float location parameter for the Poisson distribution scale : float scale parameter for the Poisson distribution shape : float tail thickness parameter for the Poisson distribution skewness : float skewness parameter for the Poisson distribution Returns ---------- - Score of the Poisson family """ return X*(y-mean)
[ "def", "reg_score_function", "(", "X", ",", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "X", "*", "(", "y", "-", "mean", ")" ]
GAS Poisson Regression Update term using gradient only - native Python function Parameters ---------- X : float datapoint for the right hand side variable y : float datapoint for the time series mean : float location parameter for the Poisson distribution scale : float scale parameter for the Poisson distribution shape : float tail thickness parameter for the Poisson distribution skewness : float skewness parameter for the Poisson distribution Returns ---------- - Score of the Poisson family
[ "GAS", "Poisson", "Regression", "Update", "term", "using", "gradient", "only", "-", "native", "Python", "function" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/poisson.py#L312-L339
train
217,404
RJT1990/pyflux
pyflux/families/poisson.py
Poisson.second_order_score
def second_order_score(y, mean, scale, shape, skewness): """ GAS Poisson Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Poisson distribution scale : float scale parameter for the Poisson distribution shape : float tail thickness parameter for the Poisson distribution skewness : float skewness parameter for the Poisson distribution Returns ---------- - Adjusted score of the Poisson family """ return (y-mean)/float(mean)
python
def second_order_score(y, mean, scale, shape, skewness): """ GAS Poisson Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Poisson distribution scale : float scale parameter for the Poisson distribution shape : float tail thickness parameter for the Poisson distribution skewness : float skewness parameter for the Poisson distribution Returns ---------- - Adjusted score of the Poisson family """ return (y-mean)/float(mean)
[ "def", "second_order_score", "(", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "(", "y", "-", "mean", ")", "/", "float", "(", "mean", ")" ]
GAS Poisson Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Poisson distribution scale : float scale parameter for the Poisson distribution shape : float tail thickness parameter for the Poisson distribution skewness : float skewness parameter for the Poisson distribution Returns ---------- - Adjusted score of the Poisson family
[ "GAS", "Poisson", "Update", "term", "potentially", "using", "second", "-", "order", "information", "-", "native", "Python", "function" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/poisson.py#L342-L366
train
217,405
RJT1990/pyflux
pyflux/families/inverse_gamma.py
InverseGamma.logpdf
def logpdf(self, x): """ Log PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - log(p(x)) """ if self.transform is not None: x = self.transform(x) return (-self.alpha-1)*np.log(x) - (self.beta/float(x))
python
def logpdf(self, x): """ Log PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - log(p(x)) """ if self.transform is not None: x = self.transform(x) return (-self.alpha-1)*np.log(x) - (self.beta/float(x))
[ "def", "logpdf", "(", "self", ",", "x", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "x", "=", "self", ".", "transform", "(", "x", ")", "return", "(", "-", "self", ".", "alpha", "-", "1", ")", "*", "np", ".", "log", "("...
Log PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - log(p(x))
[ "Log", "PDF", "for", "Inverse", "Gamma", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/inverse_gamma.py#L33-L48
train
217,406
RJT1990/pyflux
pyflux/families/inverse_gamma.py
InverseGamma.pdf
def pdf(self, x): """ PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - p(x) """ if self.transform is not None: x = self.transform(x) return (x**(-self.alpha-1))*np.exp(-(self.beta/float(x)))
python
def pdf(self, x): """ PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - p(x) """ if self.transform is not None: x = self.transform(x) return (x**(-self.alpha-1))*np.exp(-(self.beta/float(x)))
[ "def", "pdf", "(", "self", ",", "x", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "x", "=", "self", ".", "transform", "(", "x", ")", "return", "(", "x", "**", "(", "-", "self", ".", "alpha", "-", "1", ")", ")", "*", "...
PDF for Inverse Gamma prior Parameters ---------- x : float Latent variable for which the prior is being formed over Returns ---------- - p(x)
[ "PDF", "for", "Inverse", "Gamma", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/inverse_gamma.py#L50-L65
train
217,407
RJT1990/pyflux
pyflux/families/cauchy.py
Cauchy.logpdf
def logpdf(self, mu): """ Log PDF for Cauchy prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return ss.cauchy.logpdf(mu, self.loc0, self.scale0)
python
def logpdf(self, mu): """ Log PDF for Cauchy prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return ss.cauchy.logpdf(mu, self.loc0, self.scale0)
[ "def", "logpdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "return", "ss", ".", "cauchy", ".", "logpdf", "(", "mu", ",", "self", ".", "loc0", ...
Log PDF for Cauchy prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu))
[ "Log", "PDF", "for", "Cauchy", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/cauchy.py#L155-L170
train
217,408
RJT1990/pyflux
pyflux/families/cauchy.py
Cauchy.pdf
def pdf(self, mu): """ PDF for Cauchy prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ return ss.cauchy.pdf(mu, self.loc0, self.scale0)
python
def pdf(self, mu): """ PDF for Cauchy prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ return ss.cauchy.pdf(mu, self.loc0, self.scale0)
[ "def", "pdf", "(", "self", ",", "mu", ")", ":", "return", "ss", ".", "cauchy", ".", "pdf", "(", "mu", ",", "self", ".", "loc0", ",", "self", ".", "scale0", ")" ]
PDF for Cauchy prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu)
[ "PDF", "for", "Cauchy", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/cauchy.py#L172-L185
train
217,409
RJT1990/pyflux
pyflux/families/cauchy.py
Cauchy.markov_blanket
def markov_blanket(y, mean, scale, shape, skewness): """ Markov blanket for each likelihood term - used for state space models Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Markov blanket of the Cauchy family """ return ss.cauchy.logpdf(y, loc=mean, scale=scale)
python
def markov_blanket(y, mean, scale, shape, skewness): """ Markov blanket for each likelihood term - used for state space models Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Markov blanket of the Cauchy family """ return ss.cauchy.logpdf(y, loc=mean, scale=scale)
[ "def", "markov_blanket", "(", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "ss", ".", "cauchy", ".", "logpdf", "(", "y", ",", "loc", "=", "mean", ",", "scale", "=", "scale", ")" ]
Markov blanket for each likelihood term - used for state space models Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Markov blanket of the Cauchy family
[ "Markov", "blanket", "for", "each", "likelihood", "term", "-", "used", "for", "state", "space", "models" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/cauchy.py#L188-L212
train
217,410
RJT1990/pyflux
pyflux/families/cauchy.py
Cauchy.setup
def setup(): """ Returns the attributes of this family if using in a probabilistic model Notes ---------- - scale notes whether family has a variance parameter (sigma) - shape notes whether family has a tail thickness parameter (nu) - skewness notes whether family has a skewness parameter (gamma) - mean_transform is a function which transforms the location parameter - cythonized notes whether the family has cythonized routines Returns ---------- - model name, link function, scale, shape, skewness, mean_transform, cythonized """ name = "Cauchy" link = np.array scale = True shape = False skewness = False mean_transform = np.array cythonized = True # used for GAS models return name, link, scale, shape, skewness, mean_transform, cythonized
python
def setup(): """ Returns the attributes of this family if using in a probabilistic model Notes ---------- - scale notes whether family has a variance parameter (sigma) - shape notes whether family has a tail thickness parameter (nu) - skewness notes whether family has a skewness parameter (gamma) - mean_transform is a function which transforms the location parameter - cythonized notes whether the family has cythonized routines Returns ---------- - model name, link function, scale, shape, skewness, mean_transform, cythonized """ name = "Cauchy" link = np.array scale = True shape = False skewness = False mean_transform = np.array cythonized = True # used for GAS models return name, link, scale, shape, skewness, mean_transform, cythonized
[ "def", "setup", "(", ")", ":", "name", "=", "\"Cauchy\"", "link", "=", "np", ".", "array", "scale", "=", "True", "shape", "=", "False", "skewness", "=", "False", "mean_transform", "=", "np", ".", "array", "cythonized", "=", "True", "# used for GAS models",...
Returns the attributes of this family if using in a probabilistic model Notes ---------- - scale notes whether family has a variance parameter (sigma) - shape notes whether family has a tail thickness parameter (nu) - skewness notes whether family has a skewness parameter (gamma) - mean_transform is a function which transforms the location parameter - cythonized notes whether the family has cythonized routines Returns ---------- - model name, link function, scale, shape, skewness, mean_transform, cythonized
[ "Returns", "the", "attributes", "of", "this", "family", "if", "using", "in", "a", "probabilistic", "model" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/cauchy.py#L215-L237
train
217,411
RJT1990/pyflux
pyflux/families/cauchy.py
Cauchy.neg_loglikelihood
def neg_loglikelihood(y, mean, scale, shape, skewness): """ Negative loglikelihood function for this distribution Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Negative loglikelihood of the Cauchy family """ return -np.sum(ss.cauchy.logpdf(y, loc=mean, scale=scale))
python
def neg_loglikelihood(y, mean, scale, shape, skewness): """ Negative loglikelihood function for this distribution Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Negative loglikelihood of the Cauchy family """ return -np.sum(ss.cauchy.logpdf(y, loc=mean, scale=scale))
[ "def", "neg_loglikelihood", "(", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "-", "np", ".", "sum", "(", "ss", ".", "cauchy", ".", "logpdf", "(", "y", ",", "loc", "=", "mean", ",", "scale", "=", "scale", ")"...
Negative loglikelihood function for this distribution Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Negative loglikelihood of the Cauchy family
[ "Negative", "loglikelihood", "function", "for", "this", "distribution" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/cauchy.py#L294-L318
train
217,412
RJT1990/pyflux
pyflux/families/cauchy.py
Cauchy.reg_score_function
def reg_score_function(X, y, mean, scale, shape, skewness): """ GAS Cauchy Regression Update term using gradient only - native Python function Parameters ---------- X : float datapoint for the right hand side variable y : float datapoint for the time series mean : float location parameter for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Score of the Cauchy family """ return 2.0*((y-mean)*X)/(np.power(scale,2)+np.power((y-mean),2))
python
def reg_score_function(X, y, mean, scale, shape, skewness): """ GAS Cauchy Regression Update term using gradient only - native Python function Parameters ---------- X : float datapoint for the right hand side variable y : float datapoint for the time series mean : float location parameter for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Score of the Cauchy family """ return 2.0*((y-mean)*X)/(np.power(scale,2)+np.power((y-mean),2))
[ "def", "reg_score_function", "(", "X", ",", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "2.0", "*", "(", "(", "y", "-", "mean", ")", "*", "X", ")", "/", "(", "np", ".", "power", "(", "scale", ",", "2", "...
GAS Cauchy Regression Update term using gradient only - native Python function Parameters ---------- X : float datapoint for the right hand side variable y : float datapoint for the time series mean : float location parameter for the Cauchy distribution scale : float scale parameter for the Cauchy distribution shape : float tail thickness parameter for the Cauchy distribution skewness : float skewness parameter for the Cauchy distribution Returns ---------- - Score of the Cauchy family
[ "GAS", "Cauchy", "Regression", "Update", "term", "using", "gradient", "only", "-", "native", "Python", "function" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/cauchy.py#L322-L349
train
217,413
RJT1990/pyflux
pyflux/arma/nnarx.py
NNARX.general_neg_loglik
def general_neg_loglik(self, beta): """ Calculates the negative log-likelihood of the model Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- The negative logliklihood of the model """ mu, Y = self._model(beta) parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) #TODO: Replace above with transformation that only acts on scale, shape, skewness in future (speed-up) model_scale, model_shape, model_skewness = self._get_scale_and_shape(parm) return self.family.neg_loglikelihood(Y, self.link(mu), model_scale, model_shape, model_skewness)
python
def general_neg_loglik(self, beta): """ Calculates the negative log-likelihood of the model Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- The negative logliklihood of the model """ mu, Y = self._model(beta) parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) #TODO: Replace above with transformation that only acts on scale, shape, skewness in future (speed-up) model_scale, model_shape, model_skewness = self._get_scale_and_shape(parm) return self.family.neg_loglikelihood(Y, self.link(mu), model_scale, model_shape, model_skewness)
[ "def", "general_neg_loglik", "(", "self", ",", "beta", ")", ":", "mu", ",", "Y", "=", "self", ".", "_model", "(", "beta", ")", "parm", "=", "np", ".", "array", "(", "[", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ...
Calculates the negative log-likelihood of the model Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- The negative logliklihood of the model
[ "Calculates", "the", "negative", "log", "-", "likelihood", "of", "the", "model" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/arma/nnarx.py#L430-L447
train
217,414
RJT1990/pyflux
pyflux/arma/nnarx.py
NNARX.plot_fit
def plot_fit(self, **kwargs): """ Plots the fit of the model against the data """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) plt.figure(figsize=figsize) date_index = self.index[self.ar:self.data.shape[0]] mu, Y = self._model(self.latent_variables.get_z_values()) plt.plot(date_index,Y,label='Data') plt.plot(date_index,mu,label='Filter',c='black') plt.title(self.data_name) plt.legend(loc=2) plt.show()
python
def plot_fit(self, **kwargs): """ Plots the fit of the model against the data """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) plt.figure(figsize=figsize) date_index = self.index[self.ar:self.data.shape[0]] mu, Y = self._model(self.latent_variables.get_z_values()) plt.plot(date_index,Y,label='Data') plt.plot(date_index,mu,label='Filter',c='black') plt.title(self.data_name) plt.legend(loc=2) plt.show()
[ "def", "plot_fit", "(", "self", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "seaborn", "as", "sns", "figsize", "=", "kwargs", ".", "get", "(", "'figsize'", ",", "(", "10", ",", "7", ")", ")", "pl...
Plots the fit of the model against the data
[ "Plots", "the", "fit", "of", "the", "model", "against", "the", "data" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/arma/nnarx.py#L487-L503
train
217,415
RJT1990/pyflux
pyflux/arma/nnarx.py
NNARX.predict_is
def predict_is(self, h=5, fit_once=True, fit_method='MLE', intervals=False, **kwargs): """ Makes dynamic out-of-sample predictions with the estimated model on in-sample data Parameters ---------- h : int (default : 5) How many steps would you like to forecast? fit_once : boolean (default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint fit_method : string Which method to fit the model with intervals: boolean Whether to return prediction intervals Returns ---------- - pd.DataFrame with predicted values """ predictions = [] for t in range(0,h): x = NNAR(ar=self.ar, units=self.units, layers=self.layers, data=self.data_original[:-h+t], family=self.family) if fit_once is False: x.fit(method=fit_method, printer=False) if t == 0: if fit_once is True: x.fit(method=fit_method, printer=False) saved_lvs = x.latent_variables predictions = x.predict(1, intervals=intervals) else: if fit_once is True: x.latent_variables = saved_lvs predictions = pd.concat([predictions,x.predict(1, intervals=intervals)]) if intervals is True: predictions.rename(columns={0:self.data_name, 1: "1% Prediction Interval", 2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"}, inplace=True) else: predictions.rename(columns={0:self.data_name}, inplace=True) predictions.index = self.index[-h:] return predictions
python
def predict_is(self, h=5, fit_once=True, fit_method='MLE', intervals=False, **kwargs): """ Makes dynamic out-of-sample predictions with the estimated model on in-sample data Parameters ---------- h : int (default : 5) How many steps would you like to forecast? fit_once : boolean (default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint fit_method : string Which method to fit the model with intervals: boolean Whether to return prediction intervals Returns ---------- - pd.DataFrame with predicted values """ predictions = [] for t in range(0,h): x = NNAR(ar=self.ar, units=self.units, layers=self.layers, data=self.data_original[:-h+t], family=self.family) if fit_once is False: x.fit(method=fit_method, printer=False) if t == 0: if fit_once is True: x.fit(method=fit_method, printer=False) saved_lvs = x.latent_variables predictions = x.predict(1, intervals=intervals) else: if fit_once is True: x.latent_variables = saved_lvs predictions = pd.concat([predictions,x.predict(1, intervals=intervals)]) if intervals is True: predictions.rename(columns={0:self.data_name, 1: "1% Prediction Interval", 2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"}, inplace=True) else: predictions.rename(columns={0:self.data_name}, inplace=True) predictions.index = self.index[-h:] return predictions
[ "def", "predict_is", "(", "self", ",", "h", "=", "5", ",", "fit_once", "=", "True", ",", "fit_method", "=", "'MLE'", ",", "intervals", "=", "False", ",", "*", "*", "kwargs", ")", ":", "predictions", "=", "[", "]", "for", "t", "in", "range", "(", ...
Makes dynamic out-of-sample predictions with the estimated model on in-sample data Parameters ---------- h : int (default : 5) How many steps would you like to forecast? fit_once : boolean (default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint fit_method : string Which method to fit the model with intervals: boolean Whether to return prediction intervals Returns ---------- - pd.DataFrame with predicted values
[ "Makes", "dynamic", "out", "-", "of", "-", "sample", "predictions", "with", "the", "estimated", "model", "on", "in", "-", "sample", "data" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/arma/nnarx.py#L557-L603
train
217,416
RJT1990/pyflux
pyflux/ssm/nllm.py
NLLEV.likelihood_markov_blanket
def likelihood_markov_blanket(self, beta): """ Creates likelihood markov blanket of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- - Negative loglikelihood """ states = beta[self.z_no:self.z_no+self.data_length] # the local level (untransformed) parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(self.z_no)]) # transformed distribution parameters scale, shape, skewness = self._get_scale_and_shape(parm) return self.family.markov_blanket(self.data, self.link(states), scale, shape, skewness)
python
def likelihood_markov_blanket(self, beta): """ Creates likelihood markov blanket of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- - Negative loglikelihood """ states = beta[self.z_no:self.z_no+self.data_length] # the local level (untransformed) parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(self.z_no)]) # transformed distribution parameters scale, shape, skewness = self._get_scale_and_shape(parm) return self.family.markov_blanket(self.data, self.link(states), scale, shape, skewness)
[ "def", "likelihood_markov_blanket", "(", "self", ",", "beta", ")", ":", "states", "=", "beta", "[", "self", ".", "z_no", ":", "self", ".", "z_no", "+", "self", ".", "data_length", "]", "# the local level (untransformed)", "parm", "=", "np", ".", "array", "...
Creates likelihood markov blanket of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- - Negative loglikelihood
[ "Creates", "likelihood", "markov", "blanket", "of", "the", "model" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/nllm.py#L135-L150
train
217,417
RJT1990/pyflux
pyflux/ssm/nllm.py
NLLEV._animate_bbvi
def _animate_bbvi(self,stored_latent_variables,stored_predictive_likelihood): """ Produces animated plot of BBVI optimization Returns ---------- None (changes model attributes) """ from matplotlib.animation import FuncAnimation, writers import matplotlib.pyplot as plt import seaborn as sns fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ud = BBVINLLMAnimate(ax,self.data,stored_latent_variables,self.index,self.z_no,self.link) anim = FuncAnimation(fig, ud, frames=np.arange(stored_latent_variables.shape[0]), init_func=ud.init, interval=10, blit=True) plt.plot(self.data) plt.xlabel("Time") plt.ylabel(self.data_name) plt.show()
python
def _animate_bbvi(self,stored_latent_variables,stored_predictive_likelihood): """ Produces animated plot of BBVI optimization Returns ---------- None (changes model attributes) """ from matplotlib.animation import FuncAnimation, writers import matplotlib.pyplot as plt import seaborn as sns fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ud = BBVINLLMAnimate(ax,self.data,stored_latent_variables,self.index,self.z_no,self.link) anim = FuncAnimation(fig, ud, frames=np.arange(stored_latent_variables.shape[0]), init_func=ud.init, interval=10, blit=True) plt.plot(self.data) plt.xlabel("Time") plt.ylabel(self.data_name) plt.show()
[ "def", "_animate_bbvi", "(", "self", ",", "stored_latent_variables", ",", "stored_predictive_likelihood", ")", ":", "from", "matplotlib", ".", "animation", "import", "FuncAnimation", ",", "writers", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "seab...
Produces animated plot of BBVI optimization Returns ---------- None (changes model attributes)
[ "Produces", "animated", "plot", "of", "BBVI", "optimization" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/nllm.py#L278-L298
train
217,418
RJT1990/pyflux
pyflux/ssm/nllm.py
NLLEV.initialize_approx_dist
def initialize_approx_dist(self, phi, start_diffuse, gaussian_latents): """ Initializes the appoximate distibution for the model Parameters ---------- phi : np.ndarray Latent variables start_diffuse: boolean Whether to start from diffuse values or not gaussian_latents: LatentVariables object Latent variables for the Gaussian approximation Returns ---------- BBVI fit object """ # Starting values for approximate distribution for i in range(len(self.latent_variables.z_list)): approx_dist = self.latent_variables.z_list[i].q if isinstance(approx_dist, fam.Normal): self.latent_variables.z_list[i].q.mu0 = phi[i] self.latent_variables.z_list[i].q.sigma0 = np.exp(-3.0) q_list = [k.q for k in self.latent_variables.z_list] # Get starting values for states T, Z, R, Q = self._ss_matrices(phi) H, mu = self.family.approximating_model(phi, T, Z, R, Q, gaussian_latents.get_z_values(transformed=True)[0], self.data) a, V = self.smoothed_state(self.data, phi, H, mu) V[0][0][0] = V[0][0][-1] for item in range(self.data_length): if start_diffuse is False: q_list.append(fam.Normal(a[0][item], np.sqrt(np.abs(V[0][0][item])))) else: q_list.append(fam.Normal(self.family.itransform(np.mean(self.data)), np.sqrt(np.abs(V[0][0][item])))) return q_list
python
def initialize_approx_dist(self, phi, start_diffuse, gaussian_latents): """ Initializes the appoximate distibution for the model Parameters ---------- phi : np.ndarray Latent variables start_diffuse: boolean Whether to start from diffuse values or not gaussian_latents: LatentVariables object Latent variables for the Gaussian approximation Returns ---------- BBVI fit object """ # Starting values for approximate distribution for i in range(len(self.latent_variables.z_list)): approx_dist = self.latent_variables.z_list[i].q if isinstance(approx_dist, fam.Normal): self.latent_variables.z_list[i].q.mu0 = phi[i] self.latent_variables.z_list[i].q.sigma0 = np.exp(-3.0) q_list = [k.q for k in self.latent_variables.z_list] # Get starting values for states T, Z, R, Q = self._ss_matrices(phi) H, mu = self.family.approximating_model(phi, T, Z, R, Q, gaussian_latents.get_z_values(transformed=True)[0], self.data) a, V = self.smoothed_state(self.data, phi, H, mu) V[0][0][0] = V[0][0][-1] for item in range(self.data_length): if start_diffuse is False: q_list.append(fam.Normal(a[0][item], np.sqrt(np.abs(V[0][0][item])))) else: q_list.append(fam.Normal(self.family.itransform(np.mean(self.data)), np.sqrt(np.abs(V[0][0][item])))) return q_list
[ "def", "initialize_approx_dist", "(", "self", ",", "phi", ",", "start_diffuse", ",", "gaussian_latents", ")", ":", "# Starting values for approximate distribution", "for", "i", "in", "range", "(", "len", "(", "self", ".", "latent_variables", ".", "z_list", ")", ")...
Initializes the appoximate distibution for the model Parameters ---------- phi : np.ndarray Latent variables start_diffuse: boolean Whether to start from diffuse values or not gaussian_latents: LatentVariables object Latent variables for the Gaussian approximation Returns ---------- BBVI fit object
[ "Initializes", "the", "appoximate", "distibution", "for", "the", "model" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/nllm.py#L442-L482
train
217,419
RJT1990/pyflux
pyflux/families/t.py
t.logpdf
def logpdf(self, mu): """ Log PDF for t prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return ss.t.logpdf(mu, df=self.df0, loc=self.loc0, scale=self.scale0)
python
def logpdf(self, mu): """ Log PDF for t prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return ss.t.logpdf(mu, df=self.df0, loc=self.loc0, scale=self.scale0)
[ "def", "logpdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "return", "ss", ".", "t", ".", "logpdf", "(", "mu", ",", "df", "=", "self", ".", ...
Log PDF for t prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu))
[ "Log", "PDF", "for", "t", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/t.py#L186-L201
train
217,420
RJT1990/pyflux
pyflux/families/t.py
t.second_order_score
def second_order_score(y, mean, scale, shape, skewness): """ GAS t Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the t distribution scale : float scale parameter for the t distribution shape : float tail thickness parameter for the t distribution skewness : float skewness parameter for the t distribution Returns ---------- - Adjusted score of the t family """ return ((shape+1)/shape)*(y-mean)/(np.power(scale,2) + (np.power(y-mean,2)/shape))/((shape+1)*((np.power(scale,2)*shape) - np.power(y-mean,2))/np.power((np.power(scale,2)*shape) + np.power(y-mean,2),2))
python
def second_order_score(y, mean, scale, shape, skewness): """ GAS t Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the t distribution scale : float scale parameter for the t distribution shape : float tail thickness parameter for the t distribution skewness : float skewness parameter for the t distribution Returns ---------- - Adjusted score of the t family """ return ((shape+1)/shape)*(y-mean)/(np.power(scale,2) + (np.power(y-mean,2)/shape))/((shape+1)*((np.power(scale,2)*shape) - np.power(y-mean,2))/np.power((np.power(scale,2)*shape) + np.power(y-mean,2),2))
[ "def", "second_order_score", "(", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "(", "(", "shape", "+", "1", ")", "/", "shape", ")", "*", "(", "y", "-", "mean", ")", "/", "(", "np", ".", "power", "(", "scale...
GAS t Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the t distribution scale : float scale parameter for the t distribution shape : float tail thickness parameter for the t distribution skewness : float skewness parameter for the t distribution Returns ---------- - Adjusted score of the t family
[ "GAS", "t", "Update", "term", "potentially", "using", "second", "-", "order", "information", "-", "native", "Python", "function" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/t.py#L330-L354
train
217,421
RJT1990/pyflux
tools/cythonize.py
load_hashes
def load_hashes(filename): """Load the hashes dict from the hashfile""" # { filename : (sha1 of header if available or 'NA', # sha1 of input, # sha1 of output) } hashes = {} try: with open(filename, 'r') as cython_hash_file: for hash_record in cython_hash_file: (filename, header_hash, cython_hash, gen_file_hash) = hash_record.split() hashes[filename] = (header_hash, cython_hash, gen_file_hash) except (KeyError, ValueError, AttributeError, IOError): hashes = {} return hashes
python
def load_hashes(filename): """Load the hashes dict from the hashfile""" # { filename : (sha1 of header if available or 'NA', # sha1 of input, # sha1 of output) } hashes = {} try: with open(filename, 'r') as cython_hash_file: for hash_record in cython_hash_file: (filename, header_hash, cython_hash, gen_file_hash) = hash_record.split() hashes[filename] = (header_hash, cython_hash, gen_file_hash) except (KeyError, ValueError, AttributeError, IOError): hashes = {} return hashes
[ "def", "load_hashes", "(", "filename", ")", ":", "# { filename : (sha1 of header if available or 'NA',", "# sha1 of input,", "# sha1 of output) }", "hashes", "=", "{", "}", "try", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "...
Load the hashes dict from the hashfile
[ "Load", "the", "hashes", "dict", "from", "the", "hashfile" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/tools/cythonize.py#L81-L96
train
217,422
RJT1990/pyflux
tools/cythonize.py
save_hashes
def save_hashes(hashes, filename): """Save the hashes dict to the hashfile""" with open(filename, 'w') as cython_hash_file: for key, value in hashes.items(): cython_hash_file.write("%s %s %s %s\n" % (key, value[0], value[1], value[2]))
python
def save_hashes(hashes, filename): """Save the hashes dict to the hashfile""" with open(filename, 'w') as cython_hash_file: for key, value in hashes.items(): cython_hash_file.write("%s %s %s %s\n" % (key, value[0], value[1], value[2]))
[ "def", "save_hashes", "(", "hashes", ",", "filename", ")", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "cython_hash_file", ":", "for", "key", ",", "value", "in", "hashes", ".", "items", "(", ")", ":", "cython_hash_file", ".", "write", ...
Save the hashes dict to the hashfile
[ "Save", "the", "hashes", "dict", "to", "the", "hashfile" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/tools/cythonize.py#L99-L104
train
217,423
RJT1990/pyflux
tools/cythonize.py
clean_path
def clean_path(path): """Clean the path""" path = path.replace(os.sep, '/') if path.startswith('./'): path = path[2:] return path
python
def clean_path(path): """Clean the path""" path = path.replace(os.sep, '/') if path.startswith('./'): path = path[2:] return path
[ "def", "clean_path", "(", "path", ")", ":", "path", "=", "path", ".", "replace", "(", "os", ".", "sep", ",", "'/'", ")", "if", "path", ".", "startswith", "(", "'./'", ")", ":", "path", "=", "path", "[", "2", ":", "]", "return", "path" ]
Clean the path
[ "Clean", "the", "path" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/tools/cythonize.py#L114-L119
train
217,424
RJT1990/pyflux
tools/cythonize.py
get_hash_tuple
def get_hash_tuple(header_path, cython_path, gen_file_path): """Get the hashes from the given files""" header_hash = (sha1_of_file(header_path) if os.path.exists(header_path) else 'NA') from_hash = sha1_of_file(cython_path) to_hash = (sha1_of_file(gen_file_path) if os.path.exists(gen_file_path) else 'NA') return header_hash, from_hash, to_hash
python
def get_hash_tuple(header_path, cython_path, gen_file_path): """Get the hashes from the given files""" header_hash = (sha1_of_file(header_path) if os.path.exists(header_path) else 'NA') from_hash = sha1_of_file(cython_path) to_hash = (sha1_of_file(gen_file_path) if os.path.exists(gen_file_path) else 'NA') return header_hash, from_hash, to_hash
[ "def", "get_hash_tuple", "(", "header_path", ",", "cython_path", ",", "gen_file_path", ")", ":", "header_hash", "=", "(", "sha1_of_file", "(", "header_path", ")", "if", "os", ".", "path", ".", "exists", "(", "header_path", ")", "else", "'NA'", ")", "from_has...
Get the hashes from the given files
[ "Get", "the", "hashes", "from", "the", "given", "files" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/tools/cythonize.py#L122-L131
train
217,425
RJT1990/pyflux
pyflux/families/truncated_normal.py
TruncatedNormal.logpdf
def logpdf(self, mu): """ Log PDF for Truncated Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) if mu < self.lower and self.lower is not None: return -10.0**6 elif mu > self.upper and self.upper is not None: return -10.0**6 else: return -np.log(float(self.sigma0)) - (0.5*(mu-self.mu0)**2)/float(self.sigma0**2)
python
def logpdf(self, mu): """ Log PDF for Truncated Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) if mu < self.lower and self.lower is not None: return -10.0**6 elif mu > self.upper and self.upper is not None: return -10.0**6 else: return -np.log(float(self.sigma0)) - (0.5*(mu-self.mu0)**2)/float(self.sigma0**2)
[ "def", "logpdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "if", "mu", "<", "self", ".", "lower", "and", "self", ".", "lower", "is", "not", "...
Log PDF for Truncated Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu))
[ "Log", "PDF", "for", "Truncated", "Normal", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/truncated_normal.py#L42-L62
train
217,426
RJT1990/pyflux
pyflux/families/truncated_normal.py
TruncatedNormal.pdf
def pdf(self, mu): """ PDF for Truncated Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ if self.transform is not None: mu = self.transform(mu) if mu < self.lower and self.lower is not None: return 0.0 elif mu > self.upper and self.upper is not None: return 0.0 else: return (1/float(self.sigma0))*np.exp(-(0.5*(mu-self.mu0)**2)/float(self.sigma0**2))
python
def pdf(self, mu): """ PDF for Truncated Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ if self.transform is not None: mu = self.transform(mu) if mu < self.lower and self.lower is not None: return 0.0 elif mu > self.upper and self.upper is not None: return 0.0 else: return (1/float(self.sigma0))*np.exp(-(0.5*(mu-self.mu0)**2)/float(self.sigma0**2))
[ "def", "pdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "if", "mu", "<", "self", ".", "lower", "and", "self", ".", "lower", "is", "not", "Non...
PDF for Truncated Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu)
[ "PDF", "for", "Truncated", "Normal", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/truncated_normal.py#L64-L84
train
217,427
RJT1990/pyflux
pyflux/inference/metropolis_hastings.py
MetropolisHastings.tune_scale
def tune_scale(acceptance, scale): """ Tunes scale for M-H algorithm Parameters ---------- acceptance : float The most recent acceptance rate scale : float The current scale parameter Returns ---------- scale : float An adjusted scale parameter Notes ---------- Ross : Initially did this by trial and error, then refined by looking at other implementations, so some credit here to PyMC3 which became a guideline for this. """ if acceptance > 0.8: scale *= 2.0 elif acceptance <= 0.8 and acceptance > 0.4: scale *= 1.3 elif acceptance < 0.234 and acceptance > 0.1: scale *= (1/1.3) elif acceptance <= 0.1 and acceptance > 0.05: scale *= 0.4 elif acceptance <= 0.05 and acceptance > 0.01: scale *= 0.2 elif acceptance <= 0.01: scale *= 0.1 return scale
python
def tune_scale(acceptance, scale): """ Tunes scale for M-H algorithm Parameters ---------- acceptance : float The most recent acceptance rate scale : float The current scale parameter Returns ---------- scale : float An adjusted scale parameter Notes ---------- Ross : Initially did this by trial and error, then refined by looking at other implementations, so some credit here to PyMC3 which became a guideline for this. """ if acceptance > 0.8: scale *= 2.0 elif acceptance <= 0.8 and acceptance > 0.4: scale *= 1.3 elif acceptance < 0.234 and acceptance > 0.1: scale *= (1/1.3) elif acceptance <= 0.1 and acceptance > 0.05: scale *= 0.4 elif acceptance <= 0.05 and acceptance > 0.01: scale *= 0.2 elif acceptance <= 0.01: scale *= 0.1 return scale
[ "def", "tune_scale", "(", "acceptance", ",", "scale", ")", ":", "if", "acceptance", ">", "0.8", ":", "scale", "*=", "2.0", "elif", "acceptance", "<=", "0.8", "and", "acceptance", ">", "0.4", ":", "scale", "*=", "1.3", "elif", "acceptance", "<", "0.234", ...
Tunes scale for M-H algorithm Parameters ---------- acceptance : float The most recent acceptance rate scale : float The current scale parameter Returns ---------- scale : float An adjusted scale parameter Notes ---------- Ross : Initially did this by trial and error, then refined by looking at other implementations, so some credit here to PyMC3 which became a guideline for this.
[ "Tunes", "scale", "for", "M", "-", "H", "algorithm" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/inference/metropolis_hastings.py#L66-L100
train
217,428
RJT1990/pyflux
pyflux/families/normal.py
Normal.draw_variable_local
def draw_variable_local(self, size): """ Simulate from the Normal distribution using instance values Parameters ---------- size : int How many simulations to perform Returns ---------- np.ndarray of Normal random variable """ return ss.norm.rvs(loc=self.mu0, scale=self.sigma0, size=size)
python
def draw_variable_local(self, size): """ Simulate from the Normal distribution using instance values Parameters ---------- size : int How many simulations to perform Returns ---------- np.ndarray of Normal random variable """ return ss.norm.rvs(loc=self.mu0, scale=self.sigma0, size=size)
[ "def", "draw_variable_local", "(", "self", ",", "size", ")", ":", "return", "ss", ".", "norm", ".", "rvs", "(", "loc", "=", "self", ".", "mu0", ",", "scale", "=", "self", ".", "sigma0", ",", "size", "=", "size", ")" ]
Simulate from the Normal distribution using instance values Parameters ---------- size : int How many simulations to perform Returns ---------- np.ndarray of Normal random variable
[ "Simulate", "from", "the", "Normal", "distribution", "using", "instance", "values" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/normal.py#L154-L166
train
217,429
RJT1990/pyflux
pyflux/families/normal.py
Normal.first_order_score
def first_order_score(y, mean, scale, shape, skewness): """ GAS Normal Update term using gradient only - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Normal distribution scale : float scale parameter for the Normal distribution shape : float tail thickness parameter for the Normal distribution skewness : float skewness parameter for the Normal distribution Returns ---------- - Score of the Normal family """ return (y-mean)/np.power(scale,2)
python
def first_order_score(y, mean, scale, shape, skewness): """ GAS Normal Update term using gradient only - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Normal distribution scale : float scale parameter for the Normal distribution shape : float tail thickness parameter for the Normal distribution skewness : float skewness parameter for the Normal distribution Returns ---------- - Score of the Normal family """ return (y-mean)/np.power(scale,2)
[ "def", "first_order_score", "(", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "(", "y", "-", "mean", ")", "/", "np", ".", "power", "(", "scale", ",", "2", ")" ]
GAS Normal Update term using gradient only - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Normal distribution scale : float scale parameter for the Normal distribution shape : float tail thickness parameter for the Normal distribution skewness : float skewness parameter for the Normal distribution Returns ---------- - Score of the Normal family
[ "GAS", "Normal", "Update", "term", "using", "gradient", "only", "-", "native", "Python", "function" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/normal.py#L169-L193
train
217,430
RJT1990/pyflux
pyflux/families/normal.py
Normal.logpdf
def logpdf(self, mu): """ Log PDF for Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return -np.log(float(self.sigma0)) - (0.5*(mu-self.mu0)**2)/float(self.sigma0**2)
python
def logpdf(self, mu): """ Log PDF for Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return -np.log(float(self.sigma0)) - (0.5*(mu-self.mu0)**2)/float(self.sigma0**2)
[ "def", "logpdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "return", "-", "np", ".", "log", "(", "float", "(", "self", ".", "sigma0", ")", ")...
Log PDF for Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu))
[ "Log", "PDF", "for", "Normal", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/normal.py#L195-L210
train
217,431
RJT1990/pyflux
pyflux/families/normal.py
Normal.pdf
def pdf(self, mu): """ PDF for Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ if self.transform is not None: mu = self.transform(mu) return (1.0/float(self.sigma0))*np.exp(-(0.5*(mu-self.mu0)**2)/float(self.sigma0**2))
python
def pdf(self, mu): """ PDF for Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ if self.transform is not None: mu = self.transform(mu) return (1.0/float(self.sigma0))*np.exp(-(0.5*(mu-self.mu0)**2)/float(self.sigma0**2))
[ "def", "pdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "return", "(", "1.0", "/", "float", "(", "self", ".", "sigma0", ")", ")", "*", "np", ...
PDF for Normal prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu)
[ "PDF", "for", "Normal", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/normal.py#L291-L306
train
217,432
RJT1990/pyflux
pyflux/families/normal.py
Normal.vi_change_param
def vi_change_param(self, index, value): """ Wrapper function for changing latent variables - variational inference Parameters ---------- index : int 0 or 1 depending on which latent variable value : float What to change the latent variable to """ if index == 0: self.mu0 = value elif index == 1: self.sigma0 = np.exp(value)
python
def vi_change_param(self, index, value): """ Wrapper function for changing latent variables - variational inference Parameters ---------- index : int 0 or 1 depending on which latent variable value : float What to change the latent variable to """ if index == 0: self.mu0 = value elif index == 1: self.sigma0 = np.exp(value)
[ "def", "vi_change_param", "(", "self", ",", "index", ",", "value", ")", ":", "if", "index", "==", "0", ":", "self", ".", "mu0", "=", "value", "elif", "index", "==", "1", ":", "self", ".", "sigma0", "=", "np", ".", "exp", "(", "value", ")" ]
Wrapper function for changing latent variables - variational inference Parameters ---------- index : int 0 or 1 depending on which latent variable value : float What to change the latent variable to
[ "Wrapper", "function", "for", "changing", "latent", "variables", "-", "variational", "inference" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/normal.py#L365-L379
train
217,433
RJT1990/pyflux
pyflux/families/normal.py
Normal.vi_return_param
def vi_return_param(self, index): """ Wrapper function for selecting appropriate latent variable for variational inference Parameters ---------- index : int 0 or 1 depending on which latent variable Returns ---------- The appropriate indexed parameter """ if index == 0: return self.mu0 elif index == 1: return np.log(self.sigma0)
python
def vi_return_param(self, index): """ Wrapper function for selecting appropriate latent variable for variational inference Parameters ---------- index : int 0 or 1 depending on which latent variable Returns ---------- The appropriate indexed parameter """ if index == 0: return self.mu0 elif index == 1: return np.log(self.sigma0)
[ "def", "vi_return_param", "(", "self", ",", "index", ")", ":", "if", "index", "==", "0", ":", "return", "self", ".", "mu0", "elif", "index", "==", "1", ":", "return", "np", ".", "log", "(", "self", ".", "sigma0", ")" ]
Wrapper function for selecting appropriate latent variable for variational inference Parameters ---------- index : int 0 or 1 depending on which latent variable Returns ---------- The appropriate indexed parameter
[ "Wrapper", "function", "for", "selecting", "appropriate", "latent", "variable", "for", "variational", "inference" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/normal.py#L381-L396
train
217,434
RJT1990/pyflux
pyflux/families/normal.py
Normal.vi_score
def vi_score(self, x, index): """ Wrapper function for selecting appropriate score Parameters ---------- x : float A random variable index : int 0 or 1 depending on which latent variable Returns ---------- The gradient of the scale latent variable at x """ if index == 0: return self.vi_loc_score(x) elif index == 1: return self.vi_scale_score(x)
python
def vi_score(self, x, index): """ Wrapper function for selecting appropriate score Parameters ---------- x : float A random variable index : int 0 or 1 depending on which latent variable Returns ---------- The gradient of the scale latent variable at x """ if index == 0: return self.vi_loc_score(x) elif index == 1: return self.vi_scale_score(x)
[ "def", "vi_score", "(", "self", ",", "x", ",", "index", ")", ":", "if", "index", "==", "0", ":", "return", "self", ".", "vi_loc_score", "(", "x", ")", "elif", "index", "==", "1", ":", "return", "self", ".", "vi_scale_score", "(", "x", ")" ]
Wrapper function for selecting appropriate score Parameters ---------- x : float A random variable index : int 0 or 1 depending on which latent variable Returns ---------- The gradient of the scale latent variable at x
[ "Wrapper", "function", "for", "selecting", "appropriate", "score" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/normal.py#L426-L444
train
217,435
RJT1990/pyflux
pyflux/families/laplace.py
Laplace.first_order_score
def first_order_score(y, mean, scale, shape, skewness): """ GAS Laplace Update term using gradient only - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Laplace distribution scale : float scale parameter for the Laplace distribution shape : float tail thickness parameter for the Laplace distribution skewness : float skewness parameter for the Laplace distribution Returns ---------- - Score of the Laplace family """ return (y-mean)/float(scale*np.abs(y-mean))
python
def first_order_score(y, mean, scale, shape, skewness): """ GAS Laplace Update term using gradient only - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Laplace distribution scale : float scale parameter for the Laplace distribution shape : float tail thickness parameter for the Laplace distribution skewness : float skewness parameter for the Laplace distribution Returns ---------- - Score of the Laplace family """ return (y-mean)/float(scale*np.abs(y-mean))
[ "def", "first_order_score", "(", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "(", "y", "-", "mean", ")", "/", "float", "(", "scale", "*", "np", ".", "abs", "(", "y", "-", "mean", ")", ")" ]
GAS Laplace Update term using gradient only - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Laplace distribution scale : float scale parameter for the Laplace distribution shape : float tail thickness parameter for the Laplace distribution skewness : float skewness parameter for the Laplace distribution Returns ---------- - Score of the Laplace family
[ "GAS", "Laplace", "Update", "term", "using", "gradient", "only", "-", "native", "Python", "function" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/laplace.py#L156-L180
train
217,436
RJT1990/pyflux
pyflux/families/laplace.py
Laplace.logpdf
def logpdf(self, mu): """ Log PDF for Laplace prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return ss.laplace.logpdf(mu, loc=self.loc0, scale=self.scale0)
python
def logpdf(self, mu): """ Log PDF for Laplace prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return ss.laplace.logpdf(mu, loc=self.loc0, scale=self.scale0)
[ "def", "logpdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "return", "ss", ".", "laplace", ".", "logpdf", "(", "mu", ",", "loc", "=", "self", ...
Log PDF for Laplace prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu))
[ "Log", "PDF", "for", "Laplace", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/laplace.py#L182-L197
train
217,437
RJT1990/pyflux
pyflux/families/laplace.py
Laplace.pdf
def pdf(self, mu): """ PDF for Laplace prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ if self.transform is not None: mu = self.transform(mu) return ss.laplace.pdf(mu, self.loc0, self.scale0)
python
def pdf(self, mu): """ PDF for Laplace prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ if self.transform is not None: mu = self.transform(mu) return ss.laplace.pdf(mu, self.loc0, self.scale0)
[ "def", "pdf", "(", "self", ",", "mu", ")", ":", "if", "self", ".", "transform", "is", "not", "None", ":", "mu", "=", "self", ".", "transform", "(", "mu", ")", "return", "ss", ".", "laplace", ".", "pdf", "(", "mu", ",", "self", ".", "loc0", ",",...
PDF for Laplace prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu)
[ "PDF", "for", "Laplace", "prior" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/laplace.py#L278-L293
train
217,438
RJT1990/pyflux
pyflux/families/laplace.py
Laplace.second_order_score
def second_order_score(y, mean, scale, shape, skewness): """ GAS Laplace Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Laplace distribution scale : float scale parameter for the Laplace distribution shape : float tail thickness parameter for the Laplace distribution skewness : float skewness parameter for the Laplace distribution Returns ---------- - Adjusted score of the Laplace family """ return ((y-mean)/float(scale*np.abs(y-mean))) / (-(np.power(y-mean,2) - np.power(np.abs(mean-y),2))/(scale*np.power(np.abs(mean-y),3)))
python
def second_order_score(y, mean, scale, shape, skewness): """ GAS Laplace Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Laplace distribution scale : float scale parameter for the Laplace distribution shape : float tail thickness parameter for the Laplace distribution skewness : float skewness parameter for the Laplace distribution Returns ---------- - Adjusted score of the Laplace family """ return ((y-mean)/float(scale*np.abs(y-mean))) / (-(np.power(y-mean,2) - np.power(np.abs(mean-y),2))/(scale*np.power(np.abs(mean-y),3)))
[ "def", "second_order_score", "(", "y", ",", "mean", ",", "scale", ",", "shape", ",", "skewness", ")", ":", "return", "(", "(", "y", "-", "mean", ")", "/", "float", "(", "scale", "*", "np", ".", "abs", "(", "y", "-", "mean", ")", ")", ")", "/", ...
GAS Laplace Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Laplace distribution scale : float scale parameter for the Laplace distribution shape : float tail thickness parameter for the Laplace distribution skewness : float skewness parameter for the Laplace distribution Returns ---------- - Adjusted score of the Laplace family
[ "GAS", "Laplace", "Update", "term", "potentially", "using", "second", "-", "order", "information", "-", "native", "Python", "function" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/laplace.py#L326-L350
train
217,439
RJT1990/pyflux
pyflux/data_check.py
data_check
def data_check(data,target): """ Checks data type Parameters ---------- data : pd.DataFrame or np.array Field to specify the time series data that will be used. target : int or str Target column Returns ---------- transformed_data : np.array Raw data array for use in the model data_name : str Name of the data is_pandas : Boolean True if pandas data, else numpy data_index : np.array The time indices for the data """ # Check pandas or numpy if isinstance(data, pd.DataFrame) or isinstance(data, pd.core.frame.DataFrame): data_index = data.index if target is None: transformed_data = data.ix[:,0].values data_name = str(data.columns.values[0]) else: transformed_data = data[target].values data_name = str(target) is_pandas = True elif isinstance(data, np.ndarray): data_name = "Series" is_pandas = False if any(isinstance(i, np.ndarray) for i in data): if target is None: transformed_data = data[0] data_index = list(range(len(data[0]))) else: transformed_data = data[target] data_index = list(range(len(data[target]))) else: transformed_data = data data_index = list(range(len(data))) else: raise Exception("The data input is not pandas or numpy compatible!") return transformed_data, data_name, is_pandas, data_index
python
def data_check(data,target): """ Checks data type Parameters ---------- data : pd.DataFrame or np.array Field to specify the time series data that will be used. target : int or str Target column Returns ---------- transformed_data : np.array Raw data array for use in the model data_name : str Name of the data is_pandas : Boolean True if pandas data, else numpy data_index : np.array The time indices for the data """ # Check pandas or numpy if isinstance(data, pd.DataFrame) or isinstance(data, pd.core.frame.DataFrame): data_index = data.index if target is None: transformed_data = data.ix[:,0].values data_name = str(data.columns.values[0]) else: transformed_data = data[target].values data_name = str(target) is_pandas = True elif isinstance(data, np.ndarray): data_name = "Series" is_pandas = False if any(isinstance(i, np.ndarray) for i in data): if target is None: transformed_data = data[0] data_index = list(range(len(data[0]))) else: transformed_data = data[target] data_index = list(range(len(data[target]))) else: transformed_data = data data_index = list(range(len(data))) else: raise Exception("The data input is not pandas or numpy compatible!") return transformed_data, data_name, is_pandas, data_index
[ "def", "data_check", "(", "data", ",", "target", ")", ":", "# Check pandas or numpy", "if", "isinstance", "(", "data", ",", "pd", ".", "DataFrame", ")", "or", "isinstance", "(", "data", ",", "pd", ".", "core", ".", "frame", ".", "DataFrame", ")", ":", ...
Checks data type Parameters ---------- data : pd.DataFrame or np.array Field to specify the time series data that will be used. target : int or str Target column Returns ---------- transformed_data : np.array Raw data array for use in the model data_name : str Name of the data is_pandas : Boolean True if pandas data, else numpy data_index : np.array The time indices for the data
[ "Checks", "data", "type" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/data_check.py#L4-L57
train
217,440
RJT1990/pyflux
pyflux/latent_variables.py
LatentVariables.add_z
def add_z(self, name, prior, q, index=True): """ Adds latent variable Parameters ---------- name : str Name of the latent variable prior : Prior object Which prior distribution? E.g. Normal(0,1) q : Distribution object Which distribution to use for variational approximation index : boolean Whether to index the variable in the z_indices dictionary Returns ---------- None (changes priors in LatentVariables object) """ self.z_list.append(LatentVariable(name,len(self.z_list),prior,q)) if index is True: self.z_indices[name] = {'start': len(self.z_list)-1, 'end': len(self.z_list)-1}
python
def add_z(self, name, prior, q, index=True): """ Adds latent variable Parameters ---------- name : str Name of the latent variable prior : Prior object Which prior distribution? E.g. Normal(0,1) q : Distribution object Which distribution to use for variational approximation index : boolean Whether to index the variable in the z_indices dictionary Returns ---------- None (changes priors in LatentVariables object) """ self.z_list.append(LatentVariable(name,len(self.z_list),prior,q)) if index is True: self.z_indices[name] = {'start': len(self.z_list)-1, 'end': len(self.z_list)-1}
[ "def", "add_z", "(", "self", ",", "name", ",", "prior", ",", "q", ",", "index", "=", "True", ")", ":", "self", ".", "z_list", ".", "append", "(", "LatentVariable", "(", "name", ",", "len", "(", "self", ".", "z_list", ")", ",", "prior", ",", "q", ...
Adds latent variable Parameters ---------- name : str Name of the latent variable prior : Prior object Which prior distribution? E.g. Normal(0,1) q : Distribution object Which distribution to use for variational approximation index : boolean Whether to index the variable in the z_indices dictionary Returns ---------- None (changes priors in LatentVariables object)
[ "Adds", "latent", "variable" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/latent_variables.py#L46-L70
train
217,441
RJT1990/pyflux
pyflux/latent_variables.py
LatentVariables.create
def create(self, name, dim, prior, q): """ Creates multiple latent variables Parameters ---------- name : str Name of the latent variable dim : list Dimension of the latent variable arrays prior : Prior object Which prior distribution? E.g. Normal(0,1) q : Distribution object Which distribution to use for variational approximation Returns ---------- None (changes priors in LatentVariables object) """ def rec(dim, prev=[]): if len(dim) > 0: return [rec(dim[1:], prev + [i]) for i in range(dim[0])] else: return "(" + ",".join([str(j) for j in prev]) + ")" indices = rec(dim) for f_dim in range(1, len(dim)): indices = sum(indices, []) if self.z_list is None: starting_index = 0 else: starting_index = len(self.z_list) self.z_indices[name] = {'start': starting_index, 'end': starting_index+len(indices)-1, 'dim': len(dim)} for index in indices: self.add_z(name + " " + index, prior, q, index=False)
python
def create(self, name, dim, prior, q): """ Creates multiple latent variables Parameters ---------- name : str Name of the latent variable dim : list Dimension of the latent variable arrays prior : Prior object Which prior distribution? E.g. Normal(0,1) q : Distribution object Which distribution to use for variational approximation Returns ---------- None (changes priors in LatentVariables object) """ def rec(dim, prev=[]): if len(dim) > 0: return [rec(dim[1:], prev + [i]) for i in range(dim[0])] else: return "(" + ",".join([str(j) for j in prev]) + ")" indices = rec(dim) for f_dim in range(1, len(dim)): indices = sum(indices, []) if self.z_list is None: starting_index = 0 else: starting_index = len(self.z_list) self.z_indices[name] = {'start': starting_index, 'end': starting_index+len(indices)-1, 'dim': len(dim)} for index in indices: self.add_z(name + " " + index, prior, q, index=False)
[ "def", "create", "(", "self", ",", "name", ",", "dim", ",", "prior", ",", "q", ")", ":", "def", "rec", "(", "dim", ",", "prev", "=", "[", "]", ")", ":", "if", "len", "(", "dim", ")", ">", "0", ":", "return", "[", "rec", "(", "dim", "[", "...
Creates multiple latent variables Parameters ---------- name : str Name of the latent variable dim : list Dimension of the latent variable arrays prior : Prior object Which prior distribution? E.g. Normal(0,1) q : Distribution object Which distribution to use for variational approximation Returns ---------- None (changes priors in LatentVariables object)
[ "Creates", "multiple", "latent", "variables" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/latent_variables.py#L72-L113
train
217,442
RJT1990/pyflux
pyflux/gpnarx/kernels.py
ARD.build_latent_variables
def build_latent_variables(self): """ Builds latent variables for this kernel Returns ---------- - A list of lists (each sub-list contains latent variable information) """ lvs_to_build = [] lvs_to_build.append(['Noise Sigma^2', fam.Flat(transform='exp'), fam.Normal(0,3), -1.0]) for lag in range(self.X.shape[1]): lvs_to_build.append(['l lag' + str(lag+1), fam.FLat(transform='exp'), fam.Normal(0,3), -1.0]) lvs_to_build.append(['tau', fam.Flat(transform='exp'), fam.Normal(0,3), -1.0]) return lvs_to_build
python
def build_latent_variables(self): """ Builds latent variables for this kernel Returns ---------- - A list of lists (each sub-list contains latent variable information) """ lvs_to_build = [] lvs_to_build.append(['Noise Sigma^2', fam.Flat(transform='exp'), fam.Normal(0,3), -1.0]) for lag in range(self.X.shape[1]): lvs_to_build.append(['l lag' + str(lag+1), fam.FLat(transform='exp'), fam.Normal(0,3), -1.0]) lvs_to_build.append(['tau', fam.Flat(transform='exp'), fam.Normal(0,3), -1.0]) return lvs_to_build
[ "def", "build_latent_variables", "(", "self", ")", ":", "lvs_to_build", "=", "[", "]", "lvs_to_build", ".", "append", "(", "[", "'Noise Sigma^2'", ",", "fam", ".", "Flat", "(", "transform", "=", "'exp'", ")", ",", "fam", ".", "Normal", "(", "0", ",", "...
Builds latent variables for this kernel Returns ---------- - A list of lists (each sub-list contains latent variable information)
[ "Builds", "latent", "variables", "for", "this", "kernel" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/gpnarx/kernels.py#L160-L172
train
217,443
RJT1990/pyflux
pyflux/arma/nnar.py
NNAR._ar_matrix
def _ar_matrix(self): """ Creates Autoregressive matrix Returns ---------- X : np.ndarray Autoregressive Matrix """ Y = np.array(self.data[self.max_lag:self.data.shape[0]]) X = self.data[(self.max_lag-1):-1] if self.ar != 0: for i in range(1, self.ar): X = np.vstack((X,self.data[(self.max_lag-i-1):-i-1])) return X
python
def _ar_matrix(self): """ Creates Autoregressive matrix Returns ---------- X : np.ndarray Autoregressive Matrix """ Y = np.array(self.data[self.max_lag:self.data.shape[0]]) X = self.data[(self.max_lag-1):-1] if self.ar != 0: for i in range(1, self.ar): X = np.vstack((X,self.data[(self.max_lag-i-1):-i-1])) return X
[ "def", "_ar_matrix", "(", "self", ")", ":", "Y", "=", "np", ".", "array", "(", "self", ".", "data", "[", "self", ".", "max_lag", ":", "self", ".", "data", ".", "shape", "[", "0", "]", "]", ")", "X", "=", "self", ".", "data", "[", "(", "self",...
Creates Autoregressive matrix Returns ---------- X : np.ndarray Autoregressive Matrix
[ "Creates", "Autoregressive", "matrix" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/arma/nnar.py#L118-L134
train
217,444
RJT1990/pyflux
pyflux/ensembles/mixture_of_experts.py
Aggregate.predict_is
def predict_is(self, h): """ Outputs predictions for the Aggregate algorithm on the in-sample data Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of ensemble predictions """ result = pd.DataFrame([self.run(h=h)[2]]).T result.index = self.index[-h:] return result
python
def predict_is(self, h): """ Outputs predictions for the Aggregate algorithm on the in-sample data Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of ensemble predictions """ result = pd.DataFrame([self.run(h=h)[2]]).T result.index = self.index[-h:] return result
[ "def", "predict_is", "(", "self", ",", "h", ")", ":", "result", "=", "pd", ".", "DataFrame", "(", "[", "self", ".", "run", "(", "h", "=", "h", ")", "[", "2", "]", "]", ")", ".", "T", "result", ".", "index", "=", "self", ".", "index", "[", "...
Outputs predictions for the Aggregate algorithm on the in-sample data Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of ensemble predictions
[ "Outputs", "predictions", "for", "the", "Aggregate", "algorithm", "on", "the", "in", "-", "sample", "data" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ensembles/mixture_of_experts.py#L335-L349
train
217,445
RJT1990/pyflux
pyflux/ensembles/mixture_of_experts.py
Aggregate.summary
def summary(self, h): """ Summarize the results for each model for h steps of the algorithm Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of losses for each model """ _, losses, _ = self.run(h=h) df = pd.DataFrame(losses) df.index = ['Ensemble'] + self.model_names df.columns = [self.loss_name] return df
python
def summary(self, h): """ Summarize the results for each model for h steps of the algorithm Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of losses for each model """ _, losses, _ = self.run(h=h) df = pd.DataFrame(losses) df.index = ['Ensemble'] + self.model_names df.columns = [self.loss_name] return df
[ "def", "summary", "(", "self", ",", "h", ")", ":", "_", ",", "losses", ",", "_", "=", "self", ".", "run", "(", "h", "=", "h", ")", "df", "=", "pd", ".", "DataFrame", "(", "losses", ")", "df", ".", "index", "=", "[", "'Ensemble'", "]", "+", ...
Summarize the results for each model for h steps of the algorithm Parameters ---------- h : int How many steps to run the aggregating algorithm on Returns ---------- - pd.DataFrame of losses for each model
[ "Summarize", "the", "results", "for", "each", "model", "for", "h", "steps", "of", "the", "algorithm" ]
297f2afc2095acd97c12e827dd500e8ea5da0c0f
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ensembles/mixture_of_experts.py#L351-L368
train
217,446
vtkiorg/vtki
vtki/container.py
MultiBlock.extract_geometry
def extract_geometry(self): """Combines the geomertry of all blocks into a single ``PolyData`` object. Place this filter at the end of a pipeline before a polydata consumer such as a polydata mapper to extract geometry from all blocks and append them to one polydata object. """ gf = vtk.vtkCompositeDataGeometryFilter() gf.SetInputData(self) gf.Update() return wrap(gf.GetOutputDataObject(0))
python
def extract_geometry(self): """Combines the geomertry of all blocks into a single ``PolyData`` object. Place this filter at the end of a pipeline before a polydata consumer such as a polydata mapper to extract geometry from all blocks and append them to one polydata object. """ gf = vtk.vtkCompositeDataGeometryFilter() gf.SetInputData(self) gf.Update() return wrap(gf.GetOutputDataObject(0))
[ "def", "extract_geometry", "(", "self", ")", ":", "gf", "=", "vtk", ".", "vtkCompositeDataGeometryFilter", "(", ")", "gf", ".", "SetInputData", "(", "self", ")", "gf", ".", "Update", "(", ")", "return", "wrap", "(", "gf", ".", "GetOutputDataObject", "(", ...
Combines the geomertry of all blocks into a single ``PolyData`` object. Place this filter at the end of a pipeline before a polydata consumer such as a polydata mapper to extract geometry from all blocks and append them to one polydata object.
[ "Combines", "the", "geomertry", "of", "all", "blocks", "into", "a", "single", "PolyData", "object", ".", "Place", "this", "filter", "at", "the", "end", "of", "a", "pipeline", "before", "a", "polydata", "consumer", "such", "as", "a", "polydata", "mapper", "...
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L58-L67
train
217,447
vtkiorg/vtki
vtki/container.py
MultiBlock.combine
def combine(self, merge_points=False): """Appends all blocks into a single unstructured grid. Parameters ---------- merge_points : bool, optional Merge coincidental points. """ alg = vtk.vtkAppendFilter() for block in self: alg.AddInputData(block) alg.SetMergePoints(merge_points) alg.Update() return wrap(alg.GetOutputDataObject(0))
python
def combine(self, merge_points=False): """Appends all blocks into a single unstructured grid. Parameters ---------- merge_points : bool, optional Merge coincidental points. """ alg = vtk.vtkAppendFilter() for block in self: alg.AddInputData(block) alg.SetMergePoints(merge_points) alg.Update() return wrap(alg.GetOutputDataObject(0))
[ "def", "combine", "(", "self", ",", "merge_points", "=", "False", ")", ":", "alg", "=", "vtk", ".", "vtkAppendFilter", "(", ")", "for", "block", "in", "self", ":", "alg", ".", "AddInputData", "(", "block", ")", "alg", ".", "SetMergePoints", "(", "merge...
Appends all blocks into a single unstructured grid. Parameters ---------- merge_points : bool, optional Merge coincidental points.
[ "Appends", "all", "blocks", "into", "a", "single", "unstructured", "grid", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L69-L83
train
217,448
vtkiorg/vtki
vtki/container.py
MultiBlock.save
def save(self, filename, binary=True): """ Writes a ``MultiBlock`` dataset to disk. Written file may be an ASCII or binary vtm file. Parameters ---------- filename : str Filename of mesh to be written. File type is inferred from the extension of the filename unless overridden with ftype. Can be one of the following types (.vtm or .vtmb) binary : bool, optional Writes the file as binary when True and ASCII when False. Notes ----- Binary files write much faster than ASCII and have a smaller file size. """ filename = os.path.abspath(os.path.expanduser(filename)) ext = vtki.get_ext(filename) if ext in ['.vtm', '.vtmb']: writer = vtk.vtkXMLMultiBlockDataWriter() else: raise Exception('File extension must be either "vtm" or "vtmb"') writer.SetFileName(filename) writer.SetInputDataObject(self) if binary: writer.SetDataModeToBinary() else: writer.SetDataModeToAscii() writer.Write() return
python
def save(self, filename, binary=True): """ Writes a ``MultiBlock`` dataset to disk. Written file may be an ASCII or binary vtm file. Parameters ---------- filename : str Filename of mesh to be written. File type is inferred from the extension of the filename unless overridden with ftype. Can be one of the following types (.vtm or .vtmb) binary : bool, optional Writes the file as binary when True and ASCII when False. Notes ----- Binary files write much faster than ASCII and have a smaller file size. """ filename = os.path.abspath(os.path.expanduser(filename)) ext = vtki.get_ext(filename) if ext in ['.vtm', '.vtmb']: writer = vtk.vtkXMLMultiBlockDataWriter() else: raise Exception('File extension must be either "vtm" or "vtmb"') writer.SetFileName(filename) writer.SetInputDataObject(self) if binary: writer.SetDataModeToBinary() else: writer.SetDataModeToAscii() writer.Write() return
[ "def", "save", "(", "self", ",", "filename", ",", "binary", "=", "True", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "filename", ")", ")", "ext", "=", "vtki", ".", "get_ext", "(", ...
Writes a ``MultiBlock`` dataset to disk. Written file may be an ASCII or binary vtm file. Parameters ---------- filename : str Filename of mesh to be written. File type is inferred from the extension of the filename unless overridden with ftype. Can be one of the following types (.vtm or .vtmb) binary : bool, optional Writes the file as binary when True and ASCII when False. Notes ----- Binary files write much faster than ASCII and have a smaller file size.
[ "Writes", "a", "MultiBlock", "dataset", "to", "disk", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L111-L146
train
217,449
vtkiorg/vtki
vtki/container.py
MultiBlock.get_index_by_name
def get_index_by_name(self, name): """Find the index number by block name""" for i in range(self.n_blocks): if self.get_block_name(i) == name: return i raise KeyError('Block name ({}) not found'.format(name))
python
def get_index_by_name(self, name): """Find the index number by block name""" for i in range(self.n_blocks): if self.get_block_name(i) == name: return i raise KeyError('Block name ({}) not found'.format(name))
[ "def", "get_index_by_name", "(", "self", ",", "name", ")", ":", "for", "i", "in", "range", "(", "self", ".", "n_blocks", ")", ":", "if", "self", ".", "get_block_name", "(", "i", ")", "==", "name", ":", "return", "i", "raise", "KeyError", "(", "'Block...
Find the index number by block name
[ "Find", "the", "index", "number", "by", "block", "name" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L211-L216
train
217,450
vtkiorg/vtki
vtki/container.py
MultiBlock.append
def append(self, data): """Add a data set to the next block index""" index = self.n_blocks # note off by one so use as index self[index] = data self.refs.append(data)
python
def append(self, data): """Add a data set to the next block index""" index = self.n_blocks # note off by one so use as index self[index] = data self.refs.append(data)
[ "def", "append", "(", "self", ",", "data", ")", ":", "index", "=", "self", ".", "n_blocks", "# note off by one so use as index", "self", "[", "index", "]", "=", "data", "self", ".", "refs", ".", "append", "(", "data", ")" ]
Add a data set to the next block index
[ "Add", "a", "data", "set", "to", "the", "next", "block", "index" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L234-L238
train
217,451
vtkiorg/vtki
vtki/container.py
MultiBlock.set_block_name
def set_block_name(self, index, name): """Set a block's string name at the specified index""" if name is None: return self.GetMetaData(index).Set(vtk.vtkCompositeDataSet.NAME(), name) self.Modified()
python
def set_block_name(self, index, name): """Set a block's string name at the specified index""" if name is None: return self.GetMetaData(index).Set(vtk.vtkCompositeDataSet.NAME(), name) self.Modified()
[ "def", "set_block_name", "(", "self", ",", "index", ",", "name", ")", ":", "if", "name", "is", "None", ":", "return", "self", ".", "GetMetaData", "(", "index", ")", ".", "Set", "(", "vtk", ".", "vtkCompositeDataSet", ".", "NAME", "(", ")", ",", "name...
Set a block's string name at the specified index
[ "Set", "a", "block", "s", "string", "name", "at", "the", "specified", "index" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L247-L252
train
217,452
vtkiorg/vtki
vtki/container.py
MultiBlock.get_block_name
def get_block_name(self, index): """Returns the string name of the block at the given index""" meta = self.GetMetaData(index) if meta is not None: return meta.Get(vtk.vtkCompositeDataSet.NAME()) return None
python
def get_block_name(self, index): """Returns the string name of the block at the given index""" meta = self.GetMetaData(index) if meta is not None: return meta.Get(vtk.vtkCompositeDataSet.NAME()) return None
[ "def", "get_block_name", "(", "self", ",", "index", ")", ":", "meta", "=", "self", ".", "GetMetaData", "(", "index", ")", "if", "meta", "is", "not", "None", ":", "return", "meta", ".", "Get", "(", "vtk", ".", "vtkCompositeDataSet", ".", "NAME", "(", ...
Returns the string name of the block at the given index
[ "Returns", "the", "string", "name", "of", "the", "block", "at", "the", "given", "index" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L255-L260
train
217,453
vtkiorg/vtki
vtki/container.py
MultiBlock.keys
def keys(self): """Get all the block names in the dataset""" names = [] for i in range(self.n_blocks): names.append(self.get_block_name(i)) return names
python
def keys(self): """Get all the block names in the dataset""" names = [] for i in range(self.n_blocks): names.append(self.get_block_name(i)) return names
[ "def", "keys", "(", "self", ")", ":", "names", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "n_blocks", ")", ":", "names", ".", "append", "(", "self", ".", "get_block_name", "(", "i", ")", ")", "return", "names" ]
Get all the block names in the dataset
[ "Get", "all", "the", "block", "names", "in", "the", "dataset" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L263-L268
train
217,454
vtkiorg/vtki
vtki/container.py
MultiBlock.next
def next(self): """Get the next block from the iterator""" if self._iter_n < self.n_blocks: result = self[self._iter_n] self._iter_n += 1 return result else: raise StopIteration
python
def next(self): """Get the next block from the iterator""" if self._iter_n < self.n_blocks: result = self[self._iter_n] self._iter_n += 1 return result else: raise StopIteration
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_iter_n", "<", "self", ".", "n_blocks", ":", "result", "=", "self", "[", "self", ".", "_iter_n", "]", "self", ".", "_iter_n", "+=", "1", "return", "result", "else", ":", "raise", "StopIteratio...
Get the next block from the iterator
[ "Get", "the", "next", "block", "from", "the", "iterator" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L321-L328
train
217,455
vtkiorg/vtki
vtki/container.py
MultiBlock._repr_html_
def _repr_html_(self): """A pretty representation for Jupyter notebooks""" fmt = "" fmt += "<table>" fmt += "<tr><th>Information</th><th>Blocks</th></tr>" fmt += "<tr><td>" fmt += "\n" fmt += "<table>\n" fmt += "<tr><th>{}</th><th>Values</th></tr>\n".format(type(self).__name__) row = "<tr><td>{}</td><td>{}</td></tr>\n" # now make a call on the object to get its attributes as a list of len 2 tuples for attr in self._get_attrs(): try: fmt += row.format(attr[0], attr[2].format(*attr[1])) except: fmt += row.format(attr[0], attr[2].format(attr[1])) fmt += "</table>\n" fmt += "\n" fmt += "</td><td>" fmt += "\n" fmt += "<table>\n" row = "<tr><th>{}</th><th>{}</th><th>{}</th></tr>\n" fmt += row.format("Index", "Name", "Type") for i in range(self.n_blocks): data = self[i] fmt += row.format(i, self.get_block_name(i), type(data).__name__) fmt += "</table>\n" fmt += "\n" fmt += "</td></tr> </table>" return fmt
python
def _repr_html_(self): """A pretty representation for Jupyter notebooks""" fmt = "" fmt += "<table>" fmt += "<tr><th>Information</th><th>Blocks</th></tr>" fmt += "<tr><td>" fmt += "\n" fmt += "<table>\n" fmt += "<tr><th>{}</th><th>Values</th></tr>\n".format(type(self).__name__) row = "<tr><td>{}</td><td>{}</td></tr>\n" # now make a call on the object to get its attributes as a list of len 2 tuples for attr in self._get_attrs(): try: fmt += row.format(attr[0], attr[2].format(*attr[1])) except: fmt += row.format(attr[0], attr[2].format(attr[1])) fmt += "</table>\n" fmt += "\n" fmt += "</td><td>" fmt += "\n" fmt += "<table>\n" row = "<tr><th>{}</th><th>{}</th><th>{}</th></tr>\n" fmt += row.format("Index", "Name", "Type") for i in range(self.n_blocks): data = self[i] fmt += row.format(i, self.get_block_name(i), type(data).__name__) fmt += "</table>\n" fmt += "\n" fmt += "</td></tr> </table>" return fmt
[ "def", "_repr_html_", "(", "self", ")", ":", "fmt", "=", "\"\"", "fmt", "+=", "\"<table>\"", "fmt", "+=", "\"<tr><th>Information</th><th>Blocks</th></tr>\"", "fmt", "+=", "\"<tr><td>\"", "fmt", "+=", "\"\\n\"", "fmt", "+=", "\"<table>\\n\"", "fmt", "+=", "\"<tr><t...
A pretty representation for Jupyter notebooks
[ "A", "pretty", "representation", "for", "Jupyter", "notebooks" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/container.py#L367-L400
train
217,456
vtkiorg/vtki
vtki/geometric_objects.py
translate
def translate(surf, center, direction): """ Translates and orientates a mesh centered at the origin and facing in the x direction to a new center and direction """ normx = np.array(direction)/np.linalg.norm(direction) normz = np.cross(normx, [0, 1.0, 0.0000001]) normz /= np.linalg.norm(normz) normy = np.cross(normz, normx) trans = np.zeros((4, 4)) trans[:3, 0] = normx trans[:3, 1] = normy trans[:3, 2] = normz trans[3, 3] = 1 surf.transform(trans) surf.points += np.array(center)
python
def translate(surf, center, direction): """ Translates and orientates a mesh centered at the origin and facing in the x direction to a new center and direction """ normx = np.array(direction)/np.linalg.norm(direction) normz = np.cross(normx, [0, 1.0, 0.0000001]) normz /= np.linalg.norm(normz) normy = np.cross(normz, normx) trans = np.zeros((4, 4)) trans[:3, 0] = normx trans[:3, 1] = normy trans[:3, 2] = normz trans[3, 3] = 1 surf.transform(trans) surf.points += np.array(center)
[ "def", "translate", "(", "surf", ",", "center", ",", "direction", ")", ":", "normx", "=", "np", ".", "array", "(", "direction", ")", "/", "np", ".", "linalg", ".", "norm", "(", "direction", ")", "normz", "=", "np", ".", "cross", "(", "normx", ",", ...
Translates and orientates a mesh centered at the origin and facing in the x direction to a new center and direction
[ "Translates", "and", "orientates", "a", "mesh", "centered", "at", "the", "origin", "and", "facing", "in", "the", "x", "direction", "to", "a", "new", "center", "and", "direction" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/geometric_objects.py#L24-L41
train
217,457
vtkiorg/vtki
vtki/geometric_objects.py
Cylinder
def Cylinder(center=(0.,0.,0.), direction=(1.,0.,0.), radius=0.5, height=1.0, resolution=100, **kwargs): """ Create the surface of a cylinder. Parameters ---------- center : list or np.ndarray Location of the centroid in [x, y, z] direction : list or np.ndarray Direction cylinder points to in [x, y, z] radius : float Radius of the cylinder. height : float Height of the cylinder. resolution : int Number of points on the circular face of the cylinder. capping : bool, optional Cap cylinder ends with polygons. Default True Returns ------- cylinder : vtki.PolyData Cylinder surface. Examples -------- >>> import vtki >>> import numpy as np >>> cylinder = vtki.Cylinder(np.array([1, 2, 3]), np.array([1, 1, 1]), 1, 1) >>> cylinder.plot() # doctest:+SKIP """ capping = kwargs.get('capping', kwargs.get('cap_ends', True)) cylinderSource = vtk.vtkCylinderSource() cylinderSource.SetRadius(radius) cylinderSource.SetHeight(height) cylinderSource.SetCapping(capping) cylinderSource.SetResolution(resolution) cylinderSource.Update() surf = PolyData(cylinderSource.GetOutput()) surf.rotate_z(-90) translate(surf, center, direction) return surf
python
def Cylinder(center=(0.,0.,0.), direction=(1.,0.,0.), radius=0.5, height=1.0, resolution=100, **kwargs): """ Create the surface of a cylinder. Parameters ---------- center : list or np.ndarray Location of the centroid in [x, y, z] direction : list or np.ndarray Direction cylinder points to in [x, y, z] radius : float Radius of the cylinder. height : float Height of the cylinder. resolution : int Number of points on the circular face of the cylinder. capping : bool, optional Cap cylinder ends with polygons. Default True Returns ------- cylinder : vtki.PolyData Cylinder surface. Examples -------- >>> import vtki >>> import numpy as np >>> cylinder = vtki.Cylinder(np.array([1, 2, 3]), np.array([1, 1, 1]), 1, 1) >>> cylinder.plot() # doctest:+SKIP """ capping = kwargs.get('capping', kwargs.get('cap_ends', True)) cylinderSource = vtk.vtkCylinderSource() cylinderSource.SetRadius(radius) cylinderSource.SetHeight(height) cylinderSource.SetCapping(capping) cylinderSource.SetResolution(resolution) cylinderSource.Update() surf = PolyData(cylinderSource.GetOutput()) surf.rotate_z(-90) translate(surf, center, direction) return surf
[ "def", "Cylinder", "(", "center", "=", "(", "0.", ",", "0.", ",", "0.", ")", ",", "direction", "=", "(", "1.", ",", "0.", ",", "0.", ")", ",", "radius", "=", "0.5", ",", "height", "=", "1.0", ",", "resolution", "=", "100", ",", "*", "*", "kwa...
Create the surface of a cylinder. Parameters ---------- center : list or np.ndarray Location of the centroid in [x, y, z] direction : list or np.ndarray Direction cylinder points to in [x, y, z] radius : float Radius of the cylinder. height : float Height of the cylinder. resolution : int Number of points on the circular face of the cylinder. capping : bool, optional Cap cylinder ends with polygons. Default True Returns ------- cylinder : vtki.PolyData Cylinder surface. Examples -------- >>> import vtki >>> import numpy as np >>> cylinder = vtki.Cylinder(np.array([1, 2, 3]), np.array([1, 1, 1]), 1, 1) >>> cylinder.plot() # doctest:+SKIP
[ "Create", "the", "surface", "of", "a", "cylinder", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/geometric_objects.py#L44-L93
train
217,458
vtkiorg/vtki
vtki/geometric_objects.py
Arrow
def Arrow(start=(0.,0.,0.), direction=(1.,0.,0.), tip_length=0.25, tip_radius=0.1, shaft_radius=0.05, shaft_resolution=20): """ Create a vtk Arrow Parameters ---------- start : np.ndarray Start location in [x, y, z] direction : list or np.ndarray Direction the arrow points to in [x, y, z] tip_length : float, optional Length of the tip. tip_radius : float, optional Radius of the tip. shaft_radius : float, optional Radius of the shaft. shaft_resolution : int, optional Number of faces around the shaft Returns ------- arrow : vtki.PolyData Arrow surface. """ # Create arrow object arrow = vtk.vtkArrowSource() arrow.SetTipLength(tip_length) arrow.SetTipRadius(tip_radius) arrow.SetShaftRadius(shaft_radius) arrow.SetShaftResolution(shaft_resolution) arrow.Update() surf = PolyData(arrow.GetOutput()) translate(surf, start, direction) return surf
python
def Arrow(start=(0.,0.,0.), direction=(1.,0.,0.), tip_length=0.25, tip_radius=0.1, shaft_radius=0.05, shaft_resolution=20): """ Create a vtk Arrow Parameters ---------- start : np.ndarray Start location in [x, y, z] direction : list or np.ndarray Direction the arrow points to in [x, y, z] tip_length : float, optional Length of the tip. tip_radius : float, optional Radius of the tip. shaft_radius : float, optional Radius of the shaft. shaft_resolution : int, optional Number of faces around the shaft Returns ------- arrow : vtki.PolyData Arrow surface. """ # Create arrow object arrow = vtk.vtkArrowSource() arrow.SetTipLength(tip_length) arrow.SetTipRadius(tip_radius) arrow.SetShaftRadius(shaft_radius) arrow.SetShaftResolution(shaft_resolution) arrow.Update() surf = PolyData(arrow.GetOutput()) translate(surf, start, direction) return surf
[ "def", "Arrow", "(", "start", "=", "(", "0.", ",", "0.", ",", "0.", ")", ",", "direction", "=", "(", "1.", ",", "0.", ",", "0.", ")", ",", "tip_length", "=", "0.25", ",", "tip_radius", "=", "0.1", ",", "shaft_radius", "=", "0.05", ",", "shaft_res...
Create a vtk Arrow Parameters ---------- start : np.ndarray Start location in [x, y, z] direction : list or np.ndarray Direction the arrow points to in [x, y, z] tip_length : float, optional Length of the tip. tip_radius : float, optional Radius of the tip. shaft_radius : float, optional Radius of the shaft. shaft_resolution : int, optional Number of faces around the shaft Returns ------- arrow : vtki.PolyData Arrow surface.
[ "Create", "a", "vtk", "Arrow" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/geometric_objects.py#L96-L135
train
217,459
vtkiorg/vtki
vtki/geometric_objects.py
Sphere
def Sphere(radius=0.5, center=(0, 0, 0), direction=(0, 0, 1), theta_resolution=30, phi_resolution=30, start_theta=0, end_theta=360, start_phi=0, end_phi=180): """ Create a vtk Sphere Parameters ---------- radius : float, optional Sphere radius center : np.ndarray or list, optional Center in [x, y, z] direction : list or np.ndarray Direction the top of the sphere points to in [x, y, z] theta_resolution: int , optional Set the number of points in the longitude direction (ranging from start_theta to end theta). phi_resolution : int, optional Set the number of points in the latitude direction (ranging from start_phi to end_phi). start_theta : float, optional Starting longitude angle. end_theta : float, optional Ending longitude angle. start_phi : float, optional Starting latitude angle. end_phi : float, optional Ending latitude angle. Returns ------- sphere : vtki.PolyData Sphere mesh. """ sphere = vtk.vtkSphereSource() sphere.SetRadius(radius) sphere.SetThetaResolution(theta_resolution) sphere.SetPhiResolution(phi_resolution) sphere.SetStartTheta(start_theta) sphere.SetEndTheta(end_theta) sphere.SetStartPhi(start_phi) sphere.SetEndPhi(end_phi) sphere.Update() surf = PolyData(sphere.GetOutput()) surf.rotate_y(-90) translate(surf, center, direction) return surf
python
def Sphere(radius=0.5, center=(0, 0, 0), direction=(0, 0, 1), theta_resolution=30, phi_resolution=30, start_theta=0, end_theta=360, start_phi=0, end_phi=180): """ Create a vtk Sphere Parameters ---------- radius : float, optional Sphere radius center : np.ndarray or list, optional Center in [x, y, z] direction : list or np.ndarray Direction the top of the sphere points to in [x, y, z] theta_resolution: int , optional Set the number of points in the longitude direction (ranging from start_theta to end theta). phi_resolution : int, optional Set the number of points in the latitude direction (ranging from start_phi to end_phi). start_theta : float, optional Starting longitude angle. end_theta : float, optional Ending longitude angle. start_phi : float, optional Starting latitude angle. end_phi : float, optional Ending latitude angle. Returns ------- sphere : vtki.PolyData Sphere mesh. """ sphere = vtk.vtkSphereSource() sphere.SetRadius(radius) sphere.SetThetaResolution(theta_resolution) sphere.SetPhiResolution(phi_resolution) sphere.SetStartTheta(start_theta) sphere.SetEndTheta(end_theta) sphere.SetStartPhi(start_phi) sphere.SetEndPhi(end_phi) sphere.Update() surf = PolyData(sphere.GetOutput()) surf.rotate_y(-90) translate(surf, center, direction) return surf
[ "def", "Sphere", "(", "radius", "=", "0.5", ",", "center", "=", "(", "0", ",", "0", ",", "0", ")", ",", "direction", "=", "(", "0", ",", "0", ",", "1", ")", ",", "theta_resolution", "=", "30", ",", "phi_resolution", "=", "30", ",", "start_theta",...
Create a vtk Sphere Parameters ---------- radius : float, optional Sphere radius center : np.ndarray or list, optional Center in [x, y, z] direction : list or np.ndarray Direction the top of the sphere points to in [x, y, z] theta_resolution: int , optional Set the number of points in the longitude direction (ranging from start_theta to end theta). phi_resolution : int, optional Set the number of points in the latitude direction (ranging from start_phi to end_phi). start_theta : float, optional Starting longitude angle. end_theta : float, optional Ending longitude angle. start_phi : float, optional Starting latitude angle. end_phi : float, optional Ending latitude angle. Returns ------- sphere : vtki.PolyData Sphere mesh.
[ "Create", "a", "vtk", "Sphere" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/geometric_objects.py#L138-L191
train
217,460
vtkiorg/vtki
vtki/geometric_objects.py
Plane
def Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1, i_resolution=10, j_resolution=10): """ Create a plane Parameters ---------- center : list or np.ndarray Location of the centroid in [x, y, z] direction : list or np.ndarray Direction cylinder points to in [x, y, z] i_size : float Size of the plane in the i direction. j_size : float Size of the plane in the i direction. i_resolution : int Number of points on the plane in the i direction. j_resolution : int Number of points on the plane in the j direction. Returns ------- plane : vtki.PolyData Plane mesh """ planeSource = vtk.vtkPlaneSource() planeSource.SetXResolution(i_resolution) planeSource.SetYResolution(j_resolution) planeSource.Update() surf = PolyData(planeSource.GetOutput()) surf.points[:, 0] *= i_size surf.points[:, 1] *= j_size surf.rotate_y(-90) translate(surf, center, direction) return surf
python
def Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1, i_resolution=10, j_resolution=10): """ Create a plane Parameters ---------- center : list or np.ndarray Location of the centroid in [x, y, z] direction : list or np.ndarray Direction cylinder points to in [x, y, z] i_size : float Size of the plane in the i direction. j_size : float Size of the plane in the i direction. i_resolution : int Number of points on the plane in the i direction. j_resolution : int Number of points on the plane in the j direction. Returns ------- plane : vtki.PolyData Plane mesh """ planeSource = vtk.vtkPlaneSource() planeSource.SetXResolution(i_resolution) planeSource.SetYResolution(j_resolution) planeSource.Update() surf = PolyData(planeSource.GetOutput()) surf.points[:, 0] *= i_size surf.points[:, 1] *= j_size surf.rotate_y(-90) translate(surf, center, direction) return surf
[ "def", "Plane", "(", "center", "=", "(", "0", ",", "0", ",", "0", ")", ",", "direction", "=", "(", "0", ",", "0", ",", "1", ")", ",", "i_size", "=", "1", ",", "j_size", "=", "1", ",", "i_resolution", "=", "10", ",", "j_resolution", "=", "10",...
Create a plane Parameters ---------- center : list or np.ndarray Location of the centroid in [x, y, z] direction : list or np.ndarray Direction cylinder points to in [x, y, z] i_size : float Size of the plane in the i direction. j_size : float Size of the plane in the i direction. i_resolution : int Number of points on the plane in the i direction. j_resolution : int Number of points on the plane in the j direction. Returns ------- plane : vtki.PolyData Plane mesh
[ "Create", "a", "plane" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/geometric_objects.py#L194-L236
train
217,461
vtkiorg/vtki
vtki/geometric_objects.py
Line
def Line(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1): """Create a line Parameters ---------- pointa : np.ndarray or list Location in [x, y, z]. pointb : np.ndarray or list Location in [x, y, z]. resolution : int number of pieces to divide line into """ if np.array(pointa).size != 3: raise TypeError('Point A must be a length three tuple of floats.') if np.array(pointb).size != 3: raise TypeError('Point B must be a length three tuple of floats.') src = vtk.vtkLineSource() src.SetPoint1(*pointa) src.SetPoint2(*pointb) src.SetResolution(resolution) src.Update() return vtki.wrap(src.GetOutput())
python
def Line(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1): """Create a line Parameters ---------- pointa : np.ndarray or list Location in [x, y, z]. pointb : np.ndarray or list Location in [x, y, z]. resolution : int number of pieces to divide line into """ if np.array(pointa).size != 3: raise TypeError('Point A must be a length three tuple of floats.') if np.array(pointb).size != 3: raise TypeError('Point B must be a length three tuple of floats.') src = vtk.vtkLineSource() src.SetPoint1(*pointa) src.SetPoint2(*pointb) src.SetResolution(resolution) src.Update() return vtki.wrap(src.GetOutput())
[ "def", "Line", "(", "pointa", "=", "(", "-", "0.5", ",", "0.", ",", "0.", ")", ",", "pointb", "=", "(", "0.5", ",", "0.", ",", "0.", ")", ",", "resolution", "=", "1", ")", ":", "if", "np", ".", "array", "(", "pointa", ")", ".", "size", "!="...
Create a line Parameters ---------- pointa : np.ndarray or list Location in [x, y, z]. pointb : np.ndarray or list Location in [x, y, z]. resolution : int number of pieces to divide line into
[ "Create", "a", "line" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/geometric_objects.py#L239-L262
train
217,462
vtkiorg/vtki
vtki/geometric_objects.py
Cube
def Cube(center=(0., 0., 0.), x_length=1.0, y_length=1.0, z_length=1.0, bounds=None): """Create a cube by either specifying the center and side lengths or just the bounds of the cube. If ``bounds`` are given, all other arguments are ignored. Parameters ---------- center : np.ndarray or list Center in [x, y, z]. x_length : float length of the cube in the x-direction. y_length : float length of the cube in the y-direction. z_length : float length of the cube in the z-direction. bounds : np.ndarray or list Specify the bounding box of the cube. If given, all other arguments are ignored. ``(xMin,xMax, yMin,yMax, zMin,zMax)`` """ src = vtk.vtkCubeSource() if bounds is not None: if np.array(bounds).size != 6: raise TypeError('Bounds must be given as length 6 tuple: (xMin,xMax, yMin,yMax, zMin,zMax)') src.SetBounds(bounds) else: src.SetCenter(center) src.SetXLength(x_length) src.SetYLength(y_length) src.SetZLength(z_length) src.Update() return vtki.wrap(src.GetOutput())
python
def Cube(center=(0., 0., 0.), x_length=1.0, y_length=1.0, z_length=1.0, bounds=None): """Create a cube by either specifying the center and side lengths or just the bounds of the cube. If ``bounds`` are given, all other arguments are ignored. Parameters ---------- center : np.ndarray or list Center in [x, y, z]. x_length : float length of the cube in the x-direction. y_length : float length of the cube in the y-direction. z_length : float length of the cube in the z-direction. bounds : np.ndarray or list Specify the bounding box of the cube. If given, all other arguments are ignored. ``(xMin,xMax, yMin,yMax, zMin,zMax)`` """ src = vtk.vtkCubeSource() if bounds is not None: if np.array(bounds).size != 6: raise TypeError('Bounds must be given as length 6 tuple: (xMin,xMax, yMin,yMax, zMin,zMax)') src.SetBounds(bounds) else: src.SetCenter(center) src.SetXLength(x_length) src.SetYLength(y_length) src.SetZLength(z_length) src.Update() return vtki.wrap(src.GetOutput())
[ "def", "Cube", "(", "center", "=", "(", "0.", ",", "0.", ",", "0.", ")", ",", "x_length", "=", "1.0", ",", "y_length", "=", "1.0", ",", "z_length", "=", "1.0", ",", "bounds", "=", "None", ")", ":", "src", "=", "vtk", ".", "vtkCubeSource", "(", ...
Create a cube by either specifying the center and side lengths or just the bounds of the cube. If ``bounds`` are given, all other arguments are ignored. Parameters ---------- center : np.ndarray or list Center in [x, y, z]. x_length : float length of the cube in the x-direction. y_length : float length of the cube in the y-direction. z_length : float length of the cube in the z-direction. bounds : np.ndarray or list Specify the bounding box of the cube. If given, all other arguments are ignored. ``(xMin,xMax, yMin,yMax, zMin,zMax)``
[ "Create", "a", "cube", "by", "either", "specifying", "the", "center", "and", "side", "lengths", "or", "just", "the", "bounds", "of", "the", "cube", ".", "If", "bounds", "are", "given", "all", "other", "arguments", "are", "ignored", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/geometric_objects.py#L265-L299
train
217,463
vtkiorg/vtki
vtki/readers.py
standard_reader_routine
def standard_reader_routine(reader, filename, attrs=None): """Use a given reader from the ``READERS`` mapping in the common VTK reading pipeline routine. Parameters ---------- reader : vtkReader Any instantiated VTK reader class filename : str The string filename to the data file to read. attrs : dict, optional A dictionary of attributes to call on the reader. Keys of dictionary are the attribute/method names and values are the arguments passed to those calls. If you do not have any attributes to call, pass ``None`` as the value. """ if attrs is None: attrs = {} if not isinstance(attrs, dict): raise TypeError('Attributes must be a dictionary of name and arguments.') reader.SetFileName(filename) # Apply any attributes listed for name, args in attrs.items(): attr = getattr(reader, name) if args is not None: if not isinstance(args, (list, tuple)): args = [args] attr(*args) else: attr() # Perform the read reader.Update() return vtki.wrap(reader.GetOutputDataObject(0))
python
def standard_reader_routine(reader, filename, attrs=None): """Use a given reader from the ``READERS`` mapping in the common VTK reading pipeline routine. Parameters ---------- reader : vtkReader Any instantiated VTK reader class filename : str The string filename to the data file to read. attrs : dict, optional A dictionary of attributes to call on the reader. Keys of dictionary are the attribute/method names and values are the arguments passed to those calls. If you do not have any attributes to call, pass ``None`` as the value. """ if attrs is None: attrs = {} if not isinstance(attrs, dict): raise TypeError('Attributes must be a dictionary of name and arguments.') reader.SetFileName(filename) # Apply any attributes listed for name, args in attrs.items(): attr = getattr(reader, name) if args is not None: if not isinstance(args, (list, tuple)): args = [args] attr(*args) else: attr() # Perform the read reader.Update() return vtki.wrap(reader.GetOutputDataObject(0))
[ "def", "standard_reader_routine", "(", "reader", ",", "filename", ",", "attrs", "=", "None", ")", ":", "if", "attrs", "is", "None", ":", "attrs", "=", "{", "}", "if", "not", "isinstance", "(", "attrs", ",", "dict", ")", ":", "raise", "TypeError", "(", ...
Use a given reader from the ``READERS`` mapping in the common VTK reading pipeline routine. Parameters ---------- reader : vtkReader Any instantiated VTK reader class filename : str The string filename to the data file to read. attrs : dict, optional A dictionary of attributes to call on the reader. Keys of dictionary are the attribute/method names and values are the arguments passed to those calls. If you do not have any attributes to call, pass ``None`` as the value.
[ "Use", "a", "given", "reader", "from", "the", "READERS", "mapping", "in", "the", "common", "VTK", "reading", "pipeline", "routine", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/readers.py#L85-L119
train
217,464
vtkiorg/vtki
vtki/readers.py
read_legacy
def read_legacy(filename): """Use VTK's legacy reader to read a file""" reader = vtk.vtkDataSetReader() reader.SetFileName(filename) # Ensure all data is fetched with poorly formated legacy files reader.ReadAllScalarsOn() reader.ReadAllColorScalarsOn() reader.ReadAllNormalsOn() reader.ReadAllTCoordsOn() reader.ReadAllVectorsOn() # Perform the read reader.Update() output = reader.GetOutputDataObject(0) if output is None: raise AssertionError('No output when using VTKs legacy reader') return vtki.wrap(output)
python
def read_legacy(filename): """Use VTK's legacy reader to read a file""" reader = vtk.vtkDataSetReader() reader.SetFileName(filename) # Ensure all data is fetched with poorly formated legacy files reader.ReadAllScalarsOn() reader.ReadAllColorScalarsOn() reader.ReadAllNormalsOn() reader.ReadAllTCoordsOn() reader.ReadAllVectorsOn() # Perform the read reader.Update() output = reader.GetOutputDataObject(0) if output is None: raise AssertionError('No output when using VTKs legacy reader') return vtki.wrap(output)
[ "def", "read_legacy", "(", "filename", ")", ":", "reader", "=", "vtk", ".", "vtkDataSetReader", "(", ")", "reader", ".", "SetFileName", "(", "filename", ")", "# Ensure all data is fetched with poorly formated legacy files", "reader", ".", "ReadAllScalarsOn", "(", ")",...
Use VTK's legacy reader to read a file
[ "Use", "VTK", "s", "legacy", "reader", "to", "read", "a", "file" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/readers.py#L122-L137
train
217,465
vtkiorg/vtki
vtki/readers.py
read
def read(filename, attrs=None): """This will read any VTK file! It will figure out what reader to use then wrap the VTK object for use in ``vtki``. Parameters ---------- attrs : dict, optional A dictionary of attributes to call on the reader. Keys of dictionary are the attribute/method names and values are the arguments passed to those calls. If you do not have any attributes to call, pass ``None`` as the value. """ filename = os.path.abspath(os.path.expanduser(filename)) ext = get_ext(filename) # From the extension, decide which reader to use if attrs is not None: reader = get_reader(filename) return standard_reader_routine(reader, filename, attrs=attrs) elif ext in '.vti': # ImageData return vtki.UniformGrid(filename) elif ext in '.vtr': # RectilinearGrid return vtki.RectilinearGrid(filename) elif ext in '.vtu': # UnstructuredGrid return vtki.UnstructuredGrid(filename) elif ext in ['.ply', '.obj', '.stl']: # PolyData return vtki.PolyData(filename) elif ext in '.vts': # StructuredGrid return vtki.StructuredGrid(filename) elif ext in ['.vtm', '.vtmb']: return vtki.MultiBlock(filename) elif ext in ['.e', '.exo']: return read_exodus(filename) elif ext in ['.vtk']: # Attempt to use the legacy reader... return read_legacy(filename) else: # Attempt find a reader in the readers mapping try: reader = get_reader(filename) return standard_reader_routine(reader, filename) except KeyError: pass raise IOError("This file was not able to be automatically read by vtki.")
python
def read(filename, attrs=None): """This will read any VTK file! It will figure out what reader to use then wrap the VTK object for use in ``vtki``. Parameters ---------- attrs : dict, optional A dictionary of attributes to call on the reader. Keys of dictionary are the attribute/method names and values are the arguments passed to those calls. If you do not have any attributes to call, pass ``None`` as the value. """ filename = os.path.abspath(os.path.expanduser(filename)) ext = get_ext(filename) # From the extension, decide which reader to use if attrs is not None: reader = get_reader(filename) return standard_reader_routine(reader, filename, attrs=attrs) elif ext in '.vti': # ImageData return vtki.UniformGrid(filename) elif ext in '.vtr': # RectilinearGrid return vtki.RectilinearGrid(filename) elif ext in '.vtu': # UnstructuredGrid return vtki.UnstructuredGrid(filename) elif ext in ['.ply', '.obj', '.stl']: # PolyData return vtki.PolyData(filename) elif ext in '.vts': # StructuredGrid return vtki.StructuredGrid(filename) elif ext in ['.vtm', '.vtmb']: return vtki.MultiBlock(filename) elif ext in ['.e', '.exo']: return read_exodus(filename) elif ext in ['.vtk']: # Attempt to use the legacy reader... return read_legacy(filename) else: # Attempt find a reader in the readers mapping try: reader = get_reader(filename) return standard_reader_routine(reader, filename) except KeyError: pass raise IOError("This file was not able to be automatically read by vtki.")
[ "def", "read", "(", "filename", ",", "attrs", "=", "None", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "filename", ")", ")", "ext", "=", "get_ext", "(", "filename", ")", "# From the ex...
This will read any VTK file! It will figure out what reader to use then wrap the VTK object for use in ``vtki``. Parameters ---------- attrs : dict, optional A dictionary of attributes to call on the reader. Keys of dictionary are the attribute/method names and values are the arguments passed to those calls. If you do not have any attributes to call, pass ``None`` as the value.
[ "This", "will", "read", "any", "VTK", "file!", "It", "will", "figure", "out", "what", "reader", "to", "use", "then", "wrap", "the", "VTK", "object", "for", "use", "in", "vtki", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/readers.py#L140-L183
train
217,466
vtkiorg/vtki
vtki/readers.py
read_texture
def read_texture(filename, attrs=None): """Loads a ``vtkTexture`` from an image file.""" filename = os.path.abspath(os.path.expanduser(filename)) try: # intitialize the reader using the extnesion to find it reader = get_reader(filename) image = standard_reader_routine(reader, filename, attrs=attrs) return vtki.image_to_texture(image) except KeyError: # Otherwise, use the imageio reader pass return vtki.numpy_to_texture(imageio.imread(filename))
python
def read_texture(filename, attrs=None): """Loads a ``vtkTexture`` from an image file.""" filename = os.path.abspath(os.path.expanduser(filename)) try: # intitialize the reader using the extnesion to find it reader = get_reader(filename) image = standard_reader_routine(reader, filename, attrs=attrs) return vtki.image_to_texture(image) except KeyError: # Otherwise, use the imageio reader pass return vtki.numpy_to_texture(imageio.imread(filename))
[ "def", "read_texture", "(", "filename", ",", "attrs", "=", "None", ")", ":", "filename", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "filename", ")", ")", "try", ":", "# intitialize the reader using the extnesion t...
Loads a ``vtkTexture`` from an image file.
[ "Loads", "a", "vtkTexture", "from", "an", "image", "file", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/readers.py#L186-L197
train
217,467
vtkiorg/vtki
vtki/examples/downloads.py
delete_downloads
def delete_downloads(): """Delete all downloaded examples to free space or update the files""" shutil.rmtree(vtki.EXAMPLES_PATH) os.makedirs(vtki.EXAMPLES_PATH) return True
python
def delete_downloads(): """Delete all downloaded examples to free space or update the files""" shutil.rmtree(vtki.EXAMPLES_PATH) os.makedirs(vtki.EXAMPLES_PATH) return True
[ "def", "delete_downloads", "(", ")", ":", "shutil", ".", "rmtree", "(", "vtki", ".", "EXAMPLES_PATH", ")", "os", ".", "makedirs", "(", "vtki", ".", "EXAMPLES_PATH", ")", "return", "True" ]
Delete all downloaded examples to free space or update the files
[ "Delete", "all", "downloaded", "examples", "to", "free", "space", "or", "update", "the", "files" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/examples/downloads.py#L13-L17
train
217,468
vtkiorg/vtki
vtki/examples/downloads.py
download_blood_vessels
def download_blood_vessels(): """data representing the bifurcation of blood vessels.""" local_path, _ = _download_file('pvtu_blood_vessels/blood_vessels.zip') filename = os.path.join(local_path, 'T0000000500.pvtu') mesh = vtki.read(filename) mesh.set_active_vectors('velocity') return mesh
python
def download_blood_vessels(): """data representing the bifurcation of blood vessels.""" local_path, _ = _download_file('pvtu_blood_vessels/blood_vessels.zip') filename = os.path.join(local_path, 'T0000000500.pvtu') mesh = vtki.read(filename) mesh.set_active_vectors('velocity') return mesh
[ "def", "download_blood_vessels", "(", ")", ":", "local_path", ",", "_", "=", "_download_file", "(", "'pvtu_blood_vessels/blood_vessels.zip'", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "local_path", ",", "'T0000000500.pvtu'", ")", "mesh", "=", "v...
data representing the bifurcation of blood vessels.
[ "data", "representing", "the", "bifurcation", "of", "blood", "vessels", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/examples/downloads.py#L137-L143
train
217,469
vtkiorg/vtki
vtki/examples/downloads.py
download_sparse_points
def download_sparse_points(): """Used with ``download_saddle_surface``""" saved_file, _ = _download_file('sparsePoints.txt') points_reader = vtk.vtkDelimitedTextReader() points_reader.SetFileName(saved_file) points_reader.DetectNumericColumnsOn() points_reader.SetFieldDelimiterCharacters('\t') points_reader.SetHaveHeaders(True) table_points = vtk.vtkTableToPolyData() table_points.SetInputConnection(points_reader.GetOutputPort()) table_points.SetXColumn('x') table_points.SetYColumn('y') table_points.SetZColumn('z') table_points.Update() return vtki.wrap(table_points.GetOutput())
python
def download_sparse_points(): """Used with ``download_saddle_surface``""" saved_file, _ = _download_file('sparsePoints.txt') points_reader = vtk.vtkDelimitedTextReader() points_reader.SetFileName(saved_file) points_reader.DetectNumericColumnsOn() points_reader.SetFieldDelimiterCharacters('\t') points_reader.SetHaveHeaders(True) table_points = vtk.vtkTableToPolyData() table_points.SetInputConnection(points_reader.GetOutputPort()) table_points.SetXColumn('x') table_points.SetYColumn('y') table_points.SetZColumn('z') table_points.Update() return vtki.wrap(table_points.GetOutput())
[ "def", "download_sparse_points", "(", ")", ":", "saved_file", ",", "_", "=", "_download_file", "(", "'sparsePoints.txt'", ")", "points_reader", "=", "vtk", ".", "vtkDelimitedTextReader", "(", ")", "points_reader", ".", "SetFileName", "(", "saved_file", ")", "point...
Used with ``download_saddle_surface``
[ "Used", "with", "download_saddle_surface" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/examples/downloads.py#L154-L168
train
217,470
vtkiorg/vtki
vtki/examples/downloads.py
download_kitchen
def download_kitchen(split=False): """Download structured grid of kitchen with velocity field. Use the ``split`` argument to extract all of the furniture in the kitchen. """ mesh = _download_and_read('kitchen.vtk') if not split: return mesh extents = { 'door' : (27, 27, 14, 18, 0, 11), 'window1' : (0, 0, 9, 18, 6, 12), 'window2' : (5, 12, 23, 23, 6, 12), 'klower1' : (17, 17, 0, 11, 0, 6), 'klower2' : (19, 19, 0, 11, 0, 6), 'klower3' : (17, 19, 0, 0, 0, 6), 'klower4' : (17, 19, 11, 11, 0, 6), 'klower5' : (17, 19, 0, 11, 0, 0), 'klower6' : (17, 19, 0, 7, 6, 6), 'klower7' : (17, 19, 9, 11, 6, 6), 'hood1' : (17, 17, 0, 11, 11, 16), 'hood2' : (19, 19, 0, 11, 11, 16), 'hood3' : (17, 19, 0, 0, 11, 16), 'hood4' : (17, 19, 11, 11, 11, 16), 'hood5' : (17, 19, 0, 11, 16, 16), 'cookingPlate' : (17, 19, 7, 9, 6, 6), 'furniture' : (17, 19, 7, 9, 11, 11), } kitchen = vtki.MultiBlock() for key, extent in extents.items(): alg = vtk.vtkStructuredGridGeometryFilter() alg.SetInputDataObject(mesh) alg.SetExtent(extent) alg.Update() result = vtki.filters._get_output(alg) kitchen[key] = result return kitchen
python
def download_kitchen(split=False): """Download structured grid of kitchen with velocity field. Use the ``split`` argument to extract all of the furniture in the kitchen. """ mesh = _download_and_read('kitchen.vtk') if not split: return mesh extents = { 'door' : (27, 27, 14, 18, 0, 11), 'window1' : (0, 0, 9, 18, 6, 12), 'window2' : (5, 12, 23, 23, 6, 12), 'klower1' : (17, 17, 0, 11, 0, 6), 'klower2' : (19, 19, 0, 11, 0, 6), 'klower3' : (17, 19, 0, 0, 0, 6), 'klower4' : (17, 19, 11, 11, 0, 6), 'klower5' : (17, 19, 0, 11, 0, 0), 'klower6' : (17, 19, 0, 7, 6, 6), 'klower7' : (17, 19, 9, 11, 6, 6), 'hood1' : (17, 17, 0, 11, 11, 16), 'hood2' : (19, 19, 0, 11, 11, 16), 'hood3' : (17, 19, 0, 0, 11, 16), 'hood4' : (17, 19, 11, 11, 11, 16), 'hood5' : (17, 19, 0, 11, 16, 16), 'cookingPlate' : (17, 19, 7, 9, 6, 6), 'furniture' : (17, 19, 7, 9, 11, 11), } kitchen = vtki.MultiBlock() for key, extent in extents.items(): alg = vtk.vtkStructuredGridGeometryFilter() alg.SetInputDataObject(mesh) alg.SetExtent(extent) alg.Update() result = vtki.filters._get_output(alg) kitchen[key] = result return kitchen
[ "def", "download_kitchen", "(", "split", "=", "False", ")", ":", "mesh", "=", "_download_and_read", "(", "'kitchen.vtk'", ")", "if", "not", "split", ":", "return", "mesh", "extents", "=", "{", "'door'", ":", "(", "27", ",", "27", ",", "14", ",", "18", ...
Download structured grid of kitchen with velocity field. Use the ``split`` argument to extract all of the furniture in the kitchen.
[ "Download", "structured", "grid", "of", "kitchen", "with", "velocity", "field", ".", "Use", "the", "split", "argument", "to", "extract", "all", "of", "the", "furniture", "in", "the", "kitchen", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/examples/downloads.py#L324-L358
train
217,471
vtkiorg/vtki
vtki/ipy_tools.py
InteractiveTool._get_scalar_names
def _get_scalar_names(self, limit=None): """Only give scalar options that have a varying range""" names = [] if limit == 'point': inpnames = list(self.input_dataset.point_arrays.keys()) elif limit == 'cell': inpnames = list(self.input_dataset.cell_arrays.keys()) else: inpnames = self.input_dataset.scalar_names for name in inpnames: arr = self.input_dataset.get_scalar(name) rng = self.input_dataset.get_data_range(name) if arr is not None and arr.size > 0 and (rng[1]-rng[0] > 0.0): names.append(name) try: self._last_scalars = names[0] except IndexError: pass return names
python
def _get_scalar_names(self, limit=None): """Only give scalar options that have a varying range""" names = [] if limit == 'point': inpnames = list(self.input_dataset.point_arrays.keys()) elif limit == 'cell': inpnames = list(self.input_dataset.cell_arrays.keys()) else: inpnames = self.input_dataset.scalar_names for name in inpnames: arr = self.input_dataset.get_scalar(name) rng = self.input_dataset.get_data_range(name) if arr is not None and arr.size > 0 and (rng[1]-rng[0] > 0.0): names.append(name) try: self._last_scalars = names[0] except IndexError: pass return names
[ "def", "_get_scalar_names", "(", "self", ",", "limit", "=", "None", ")", ":", "names", "=", "[", "]", "if", "limit", "==", "'point'", ":", "inpnames", "=", "list", "(", "self", ".", "input_dataset", ".", "point_arrays", ".", "keys", "(", ")", ")", "e...
Only give scalar options that have a varying range
[ "Only", "give", "scalar", "options", "that", "have", "a", "varying", "range" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/ipy_tools.py#L133-L151
train
217,472
vtkiorg/vtki
vtki/ipy_tools.py
InteractiveTool._initialize
def _initialize(self, show_bounds, reset_camera, outline): """Outlines the input dataset and sets up the scene""" self.plotter.subplot(*self.loc) if outline is None: self.plotter.add_mesh(self.input_dataset.outline_corners(), reset_camera=False, color=vtki.rcParams['outline_color'], loc=self.loc) elif outline: self.plotter.add_mesh(self.input_dataset.outline(), reset_camera=False, color=vtki.rcParams['outline_color'], loc=self.loc) # add the axis labels if show_bounds: self.plotter.show_bounds(reset_camera=False, loc=loc) if reset_camera: cpos = self.plotter.get_default_cam_pos() self.plotter.camera_position = cpos self.plotter.reset_camera() self.plotter.camera_set = False
python
def _initialize(self, show_bounds, reset_camera, outline): """Outlines the input dataset and sets up the scene""" self.plotter.subplot(*self.loc) if outline is None: self.plotter.add_mesh(self.input_dataset.outline_corners(), reset_camera=False, color=vtki.rcParams['outline_color'], loc=self.loc) elif outline: self.plotter.add_mesh(self.input_dataset.outline(), reset_camera=False, color=vtki.rcParams['outline_color'], loc=self.loc) # add the axis labels if show_bounds: self.plotter.show_bounds(reset_camera=False, loc=loc) if reset_camera: cpos = self.plotter.get_default_cam_pos() self.plotter.camera_position = cpos self.plotter.reset_camera() self.plotter.camera_set = False
[ "def", "_initialize", "(", "self", ",", "show_bounds", ",", "reset_camera", ",", "outline", ")", ":", "self", ".", "plotter", ".", "subplot", "(", "*", "self", ".", "loc", ")", "if", "outline", "is", "None", ":", "self", ".", "plotter", ".", "add_mesh"...
Outlines the input dataset and sets up the scene
[ "Outlines", "the", "input", "dataset", "and", "sets", "up", "the", "scene" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/ipy_tools.py#L160-L178
train
217,473
vtkiorg/vtki
vtki/ipy_tools.py
InteractiveTool._update_plotting_params
def _update_plotting_params(self, **kwargs): """Some plotting parameters can be changed through the tool; this updataes those plotting parameters. """ scalars = kwargs.get('scalars', None) if scalars is not None: old = self.display_params['scalars'] self.display_params['scalars'] = scalars if old != scalars: self.plotter.subplot(*self.loc) self.plotter.remove_actor(self._data_to_update, reset_camera=False) self._need_to_update = True self.valid_range = self.input_dataset.get_data_range(scalars) # self.display_params['rng'] = self.valid_range cmap = kwargs.get('cmap', None) if cmap is not None: self.display_params['cmap'] = cmap
python
def _update_plotting_params(self, **kwargs): """Some plotting parameters can be changed through the tool; this updataes those plotting parameters. """ scalars = kwargs.get('scalars', None) if scalars is not None: old = self.display_params['scalars'] self.display_params['scalars'] = scalars if old != scalars: self.plotter.subplot(*self.loc) self.plotter.remove_actor(self._data_to_update, reset_camera=False) self._need_to_update = True self.valid_range = self.input_dataset.get_data_range(scalars) # self.display_params['rng'] = self.valid_range cmap = kwargs.get('cmap', None) if cmap is not None: self.display_params['cmap'] = cmap
[ "def", "_update_plotting_params", "(", "self", ",", "*", "*", "kwargs", ")", ":", "scalars", "=", "kwargs", ".", "get", "(", "'scalars'", ",", "None", ")", "if", "scalars", "is", "not", "None", ":", "old", "=", "self", ".", "display_params", "[", "'sca...
Some plotting parameters can be changed through the tool; this updataes those plotting parameters.
[ "Some", "plotting", "parameters", "can", "be", "changed", "through", "the", "tool", ";", "this", "updataes", "those", "plotting", "parameters", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/ipy_tools.py#L181-L197
train
217,474
vtkiorg/vtki
vtki/renderer.py
_remove_mapper_from_plotter
def _remove_mapper_from_plotter(plotter, actor, reset_camera): """removes this actor's mapper from the given plotter's _scalar_bar_mappers""" try: mapper = actor.GetMapper() except AttributeError: return for name in list(plotter._scalar_bar_mappers.keys()): try: plotter._scalar_bar_mappers[name].remove(mapper) except ValueError: pass if len(plotter._scalar_bar_mappers[name]) < 1: slot = plotter._scalar_bar_slot_lookup.pop(name) plotter._scalar_bar_mappers.pop(name) plotter._scalar_bar_ranges.pop(name) plotter.remove_actor(plotter._scalar_bar_actors.pop(name), reset_camera=reset_camera) plotter._scalar_bar_slots.add(slot) return
python
def _remove_mapper_from_plotter(plotter, actor, reset_camera): """removes this actor's mapper from the given plotter's _scalar_bar_mappers""" try: mapper = actor.GetMapper() except AttributeError: return for name in list(plotter._scalar_bar_mappers.keys()): try: plotter._scalar_bar_mappers[name].remove(mapper) except ValueError: pass if len(plotter._scalar_bar_mappers[name]) < 1: slot = plotter._scalar_bar_slot_lookup.pop(name) plotter._scalar_bar_mappers.pop(name) plotter._scalar_bar_ranges.pop(name) plotter.remove_actor(plotter._scalar_bar_actors.pop(name), reset_camera=reset_camera) plotter._scalar_bar_slots.add(slot) return
[ "def", "_remove_mapper_from_plotter", "(", "plotter", ",", "actor", ",", "reset_camera", ")", ":", "try", ":", "mapper", "=", "actor", ".", "GetMapper", "(", ")", "except", "AttributeError", ":", "return", "for", "name", "in", "list", "(", "plotter", ".", ...
removes this actor's mapper from the given plotter's _scalar_bar_mappers
[ "removes", "this", "actor", "s", "mapper", "from", "the", "given", "plotter", "s", "_scalar_bar_mappers" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L774-L791
train
217,475
vtkiorg/vtki
vtki/renderer.py
Renderer.add_axes_at_origin
def add_axes_at_origin(self): """ Add axes actor at origin Returns -------- marker_actor : vtk.vtkAxesActor vtkAxesActor actor """ self.marker_actor = vtk.vtkAxesActor() # renderer = self.renderers[self.loc_to_index(loc)] self.AddActor(self.marker_actor) self.parent._actors[str(hex(id(self.marker_actor)))] = self.marker_actor return self.marker_actor
python
def add_axes_at_origin(self): """ Add axes actor at origin Returns -------- marker_actor : vtk.vtkAxesActor vtkAxesActor actor """ self.marker_actor = vtk.vtkAxesActor() # renderer = self.renderers[self.loc_to_index(loc)] self.AddActor(self.marker_actor) self.parent._actors[str(hex(id(self.marker_actor)))] = self.marker_actor return self.marker_actor
[ "def", "add_axes_at_origin", "(", "self", ")", ":", "self", ".", "marker_actor", "=", "vtk", ".", "vtkAxesActor", "(", ")", "# renderer = self.renderers[self.loc_to_index(loc)]", "self", ".", "AddActor", "(", "self", ".", "marker_actor", ")", "self", ".", "parent"...
Add axes actor at origin Returns -------- marker_actor : vtk.vtkAxesActor vtkAxesActor actor
[ "Add", "axes", "actor", "at", "origin" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L128-L141
train
217,476
vtkiorg/vtki
vtki/renderer.py
Renderer.remove_bounding_box
def remove_bounding_box(self): """ Removes bounding box """ if hasattr(self, '_box_object'): actor = self.bounding_box_actor self.bounding_box_actor = None del self._box_object self.remove_actor(actor, reset_camera=False)
python
def remove_bounding_box(self): """ Removes bounding box """ if hasattr(self, '_box_object'): actor = self.bounding_box_actor self.bounding_box_actor = None del self._box_object self.remove_actor(actor, reset_camera=False)
[ "def", "remove_bounding_box", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'_box_object'", ")", ":", "actor", "=", "self", ".", "bounding_box_actor", "self", ".", "bounding_box_actor", "=", "None", "del", "self", ".", "_box_object", "self", "."...
Removes bounding box
[ "Removes", "bounding", "box" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L433-L439
train
217,477
vtkiorg/vtki
vtki/renderer.py
Renderer.camera_position
def camera_position(self): """ Returns camera position of active render window """ return [self.camera.GetPosition(), self.camera.GetFocalPoint(), self.camera.GetViewUp()]
python
def camera_position(self): """ Returns camera position of active render window """ return [self.camera.GetPosition(), self.camera.GetFocalPoint(), self.camera.GetViewUp()]
[ "def", "camera_position", "(", "self", ")", ":", "return", "[", "self", ".", "camera", ".", "GetPosition", "(", ")", ",", "self", ".", "camera", ".", "GetFocalPoint", "(", ")", ",", "self", ".", "camera", ".", "GetViewUp", "(", ")", "]" ]
Returns camera position of active render window
[ "Returns", "camera", "position", "of", "active", "render", "window" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L499-L503
train
217,478
vtkiorg/vtki
vtki/renderer.py
Renderer.camera_position
def camera_position(self, camera_location): """ Set camera position of all active render windows """ if camera_location is None: return if isinstance(camera_location, str): camera_location = camera_location.lower() if camera_location == 'xy': self.view_xy() elif camera_location == 'xz': self.view_xz() elif camera_location == 'yz': self.view_yz() elif camera_location == 'yx': self.view_xy(True) elif camera_location == 'zx': self.view_xz(True) elif camera_location == 'zy': self.view_yz(True) return if isinstance(camera_location[0], (int, float)): return self.view_vector(camera_location) # everything is set explicitly self.camera.SetPosition(camera_location[0]) self.camera.SetFocalPoint(camera_location[1]) self.camera.SetViewUp(camera_location[2]) # reset clipping range self.ResetCameraClippingRange() self.camera_set = True
python
def camera_position(self, camera_location): """ Set camera position of all active render windows """ if camera_location is None: return if isinstance(camera_location, str): camera_location = camera_location.lower() if camera_location == 'xy': self.view_xy() elif camera_location == 'xz': self.view_xz() elif camera_location == 'yz': self.view_yz() elif camera_location == 'yx': self.view_xy(True) elif camera_location == 'zx': self.view_xz(True) elif camera_location == 'zy': self.view_yz(True) return if isinstance(camera_location[0], (int, float)): return self.view_vector(camera_location) # everything is set explicitly self.camera.SetPosition(camera_location[0]) self.camera.SetFocalPoint(camera_location[1]) self.camera.SetViewUp(camera_location[2]) # reset clipping range self.ResetCameraClippingRange() self.camera_set = True
[ "def", "camera_position", "(", "self", ",", "camera_location", ")", ":", "if", "camera_location", "is", "None", ":", "return", "if", "isinstance", "(", "camera_location", ",", "str", ")", ":", "camera_location", "=", "camera_location", ".", "lower", "(", ")", ...
Set camera position of all active render windows
[ "Set", "camera", "position", "of", "all", "active", "render", "windows" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L506-L537
train
217,479
vtkiorg/vtki
vtki/renderer.py
Renderer.remove_actor
def remove_actor(self, actor, reset_camera=False): """ Removes an actor from the Renderer. Parameters ---------- actor : vtk.vtkActor Actor that has previously added to the Renderer. reset_camera : bool, optional Resets camera so all actors can be seen. Returns ------- success : bool True when actor removed. False when actor has not been removed. """ name = None if isinstance(actor, str): name = actor keys = list(self._actors.keys()) names = [] for k in keys: if k.startswith('{}-'.format(name)): names.append(k) if len(names) > 0: self.remove_actor(names, reset_camera=reset_camera) try: actor = self._actors[name] except KeyError: # If actor of that name is not present then return success return False if isinstance(actor, collections.Iterable): success = False for a in actor: rv = self.remove_actor(a, reset_camera=reset_camera) if rv or success: success = True return success if actor is None: return False # First remove this actor's mapper from _scalar_bar_mappers _remove_mapper_from_plotter(self.parent, actor, False) self.RemoveActor(actor) if name is None: for k, v in self._actors.items(): if v == actor: name = k self._actors.pop(name, None) self.update_bounds_axes() if reset_camera: self.reset_camera() elif not self.camera_set and reset_camera is None: self.reset_camera() else: self.parent._render() return True
python
def remove_actor(self, actor, reset_camera=False): """ Removes an actor from the Renderer. Parameters ---------- actor : vtk.vtkActor Actor that has previously added to the Renderer. reset_camera : bool, optional Resets camera so all actors can be seen. Returns ------- success : bool True when actor removed. False when actor has not been removed. """ name = None if isinstance(actor, str): name = actor keys = list(self._actors.keys()) names = [] for k in keys: if k.startswith('{}-'.format(name)): names.append(k) if len(names) > 0: self.remove_actor(names, reset_camera=reset_camera) try: actor = self._actors[name] except KeyError: # If actor of that name is not present then return success return False if isinstance(actor, collections.Iterable): success = False for a in actor: rv = self.remove_actor(a, reset_camera=reset_camera) if rv or success: success = True return success if actor is None: return False # First remove this actor's mapper from _scalar_bar_mappers _remove_mapper_from_plotter(self.parent, actor, False) self.RemoveActor(actor) if name is None: for k, v in self._actors.items(): if v == actor: name = k self._actors.pop(name, None) self.update_bounds_axes() if reset_camera: self.reset_camera() elif not self.camera_set and reset_camera is None: self.reset_camera() else: self.parent._render() return True
[ "def", "remove_actor", "(", "self", ",", "actor", ",", "reset_camera", "=", "False", ")", ":", "name", "=", "None", "if", "isinstance", "(", "actor", ",", "str", ")", ":", "name", "=", "actor", "keys", "=", "list", "(", "self", ".", "_actors", ".", ...
Removes an actor from the Renderer. Parameters ---------- actor : vtk.vtkActor Actor that has previously added to the Renderer. reset_camera : bool, optional Resets camera so all actors can be seen. Returns ------- success : bool True when actor removed. False when actor has not been removed.
[ "Removes", "an", "actor", "from", "the", "Renderer", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L544-L603
train
217,480
vtkiorg/vtki
vtki/renderer.py
Renderer.set_scale
def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True): """ Scale all the datasets in the scene. Scaling in performed independently on the X, Y and Z axis. A scale of zero is illegal and will be replaced with one. """ if xscale is None: xscale = self.scale[0] if yscale is None: yscale = self.scale[1] if zscale is None: zscale = self.scale[2] self.scale = [xscale, yscale, zscale] # Update the camera's coordinate system transform = vtk.vtkTransform() transform.Scale(xscale, yscale, zscale) self.camera.SetModelTransformMatrix(transform.GetMatrix()) self.parent._render() if reset_camera: self.update_bounds_axes() self.reset_camera()
python
def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True): """ Scale all the datasets in the scene. Scaling in performed independently on the X, Y and Z axis. A scale of zero is illegal and will be replaced with one. """ if xscale is None: xscale = self.scale[0] if yscale is None: yscale = self.scale[1] if zscale is None: zscale = self.scale[2] self.scale = [xscale, yscale, zscale] # Update the camera's coordinate system transform = vtk.vtkTransform() transform.Scale(xscale, yscale, zscale) self.camera.SetModelTransformMatrix(transform.GetMatrix()) self.parent._render() if reset_camera: self.update_bounds_axes() self.reset_camera()
[ "def", "set_scale", "(", "self", ",", "xscale", "=", "None", ",", "yscale", "=", "None", ",", "zscale", "=", "None", ",", "reset_camera", "=", "True", ")", ":", "if", "xscale", "is", "None", ":", "xscale", "=", "self", ".", "scale", "[", "0", "]", ...
Scale all the datasets in the scene. Scaling in performed independently on the X, Y and Z axis. A scale of zero is illegal and will be replaced with one.
[ "Scale", "all", "the", "datasets", "in", "the", "scene", ".", "Scaling", "in", "performed", "independently", "on", "the", "X", "Y", "and", "Z", "axis", ".", "A", "scale", "of", "zero", "is", "illegal", "and", "will", "be", "replaced", "with", "one", "....
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L605-L626
train
217,481
vtkiorg/vtki
vtki/renderer.py
Renderer.bounds
def bounds(self): """ Bounds of all actors present in the rendering window """ the_bounds = [np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf] def _update_bounds(bounds): def update_axis(ax): if bounds[ax*2] < the_bounds[ax*2]: the_bounds[ax*2] = bounds[ax*2] if bounds[ax*2+1] > the_bounds[ax*2+1]: the_bounds[ax*2+1] = bounds[ax*2+1] for ax in range(3): update_axis(ax) return for actor in self._actors.values(): if isinstance(actor, vtk.vtkCubeAxesActor): continue if ( hasattr(actor, 'GetBounds') and actor.GetBounds() is not None and id(actor) != id(self.bounding_box_actor)): _update_bounds(actor.GetBounds()) return the_bounds
python
def bounds(self): """ Bounds of all actors present in the rendering window """ the_bounds = [np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf] def _update_bounds(bounds): def update_axis(ax): if bounds[ax*2] < the_bounds[ax*2]: the_bounds[ax*2] = bounds[ax*2] if bounds[ax*2+1] > the_bounds[ax*2+1]: the_bounds[ax*2+1] = bounds[ax*2+1] for ax in range(3): update_axis(ax) return for actor in self._actors.values(): if isinstance(actor, vtk.vtkCubeAxesActor): continue if ( hasattr(actor, 'GetBounds') and actor.GetBounds() is not None and id(actor) != id(self.bounding_box_actor)): _update_bounds(actor.GetBounds()) return the_bounds
[ "def", "bounds", "(", "self", ")", ":", "the_bounds", "=", "[", "np", ".", "inf", ",", "-", "np", ".", "inf", ",", "np", ".", "inf", ",", "-", "np", ".", "inf", ",", "np", ".", "inf", ",", "-", "np", ".", "inf", "]", "def", "_update_bounds", ...
Bounds of all actors present in the rendering window
[ "Bounds", "of", "all", "actors", "present", "in", "the", "rendering", "window" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L629-L650
train
217,482
vtkiorg/vtki
vtki/renderer.py
Renderer.center
def center(self): """Center of the bounding box around all data present in the scene""" bounds = self.bounds x = (bounds[1] + bounds[0])/2 y = (bounds[3] + bounds[2])/2 z = (bounds[5] + bounds[4])/2 return [x, y, z]
python
def center(self): """Center of the bounding box around all data present in the scene""" bounds = self.bounds x = (bounds[1] + bounds[0])/2 y = (bounds[3] + bounds[2])/2 z = (bounds[5] + bounds[4])/2 return [x, y, z]
[ "def", "center", "(", "self", ")", ":", "bounds", "=", "self", ".", "bounds", "x", "=", "(", "bounds", "[", "1", "]", "+", "bounds", "[", "0", "]", ")", "/", "2", "y", "=", "(", "bounds", "[", "3", "]", "+", "bounds", "[", "2", "]", ")", ...
Center of the bounding box around all data present in the scene
[ "Center", "of", "the", "bounding", "box", "around", "all", "data", "present", "in", "the", "scene" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L653-L659
train
217,483
vtkiorg/vtki
vtki/renderer.py
Renderer.get_default_cam_pos
def get_default_cam_pos(self): """ Returns the default focal points and viewup. Uses ResetCamera to make a useful view. """ focal_pt = self.center return [np.array(rcParams['camera']['position']) + np.array(focal_pt), focal_pt, rcParams['camera']['viewup']]
python
def get_default_cam_pos(self): """ Returns the default focal points and viewup. Uses ResetCamera to make a useful view. """ focal_pt = self.center return [np.array(rcParams['camera']['position']) + np.array(focal_pt), focal_pt, rcParams['camera']['viewup']]
[ "def", "get_default_cam_pos", "(", "self", ")", ":", "focal_pt", "=", "self", ".", "center", "return", "[", "np", ".", "array", "(", "rcParams", "[", "'camera'", "]", "[", "'position'", "]", ")", "+", "np", ".", "array", "(", "focal_pt", ")", ",", "f...
Returns the default focal points and viewup. Uses ResetCamera to make a useful view.
[ "Returns", "the", "default", "focal", "points", "and", "viewup", ".", "Uses", "ResetCamera", "to", "make", "a", "useful", "view", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L661-L668
train
217,484
vtkiorg/vtki
vtki/renderer.py
Renderer.update_bounds_axes
def update_bounds_axes(self): """Update the bounds axes of the render window """ if (hasattr(self, '_box_object') and self._box_object is not None and self.bounding_box_actor is not None): if not np.allclose(self._box_object.bounds, self.bounds): color = self.bounding_box_actor.GetProperty().GetColor() self.remove_bounding_box() self.add_bounding_box(color=color) if hasattr(self, 'cube_axes_actor'): self.cube_axes_actor.SetBounds(self.bounds) if not np.allclose(self.scale, [1.0, 1.0, 1.0]): self.cube_axes_actor.SetUse2DMode(True) else: self.cube_axes_actor.SetUse2DMode(False)
python
def update_bounds_axes(self): """Update the bounds axes of the render window """ if (hasattr(self, '_box_object') and self._box_object is not None and self.bounding_box_actor is not None): if not np.allclose(self._box_object.bounds, self.bounds): color = self.bounding_box_actor.GetProperty().GetColor() self.remove_bounding_box() self.add_bounding_box(color=color) if hasattr(self, 'cube_axes_actor'): self.cube_axes_actor.SetBounds(self.bounds) if not np.allclose(self.scale, [1.0, 1.0, 1.0]): self.cube_axes_actor.SetUse2DMode(True) else: self.cube_axes_actor.SetUse2DMode(False)
[ "def", "update_bounds_axes", "(", "self", ")", ":", "if", "(", "hasattr", "(", "self", ",", "'_box_object'", ")", "and", "self", ".", "_box_object", "is", "not", "None", "and", "self", ".", "bounding_box_actor", "is", "not", "None", ")", ":", "if", "not"...
Update the bounds axes of the render window
[ "Update", "the", "bounds", "axes", "of", "the", "render", "window" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L670-L683
train
217,485
vtkiorg/vtki
vtki/renderer.py
Renderer.view_isometric
def view_isometric(self): """ Resets the camera to a default isometric view showing all the actors in the scene. """ self.camera_position = self.get_default_cam_pos() self.camera_set = False return self.reset_camera()
python
def view_isometric(self): """ Resets the camera to a default isometric view showing all the actors in the scene. """ self.camera_position = self.get_default_cam_pos() self.camera_set = False return self.reset_camera()
[ "def", "view_isometric", "(", "self", ")", ":", "self", ".", "camera_position", "=", "self", ".", "get_default_cam_pos", "(", ")", "self", ".", "camera_set", "=", "False", "return", "self", ".", "reset_camera", "(", ")" ]
Resets the camera to a default isometric view showing all the actors in the scene.
[ "Resets", "the", "camera", "to", "a", "default", "isometric", "view", "showing", "all", "the", "actors", "in", "the", "scene", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L697-L704
train
217,486
vtkiorg/vtki
vtki/renderer.py
Renderer.view_vector
def view_vector(self, vector, viewup=None): """Point the camera in the direction of the given vector""" focal_pt = self.center if viewup is None: viewup = rcParams['camera']['viewup'] cpos = [vector + np.array(focal_pt), focal_pt, viewup] self.camera_position = cpos return self.reset_camera()
python
def view_vector(self, vector, viewup=None): """Point the camera in the direction of the given vector""" focal_pt = self.center if viewup is None: viewup = rcParams['camera']['viewup'] cpos = [vector + np.array(focal_pt), focal_pt, viewup] self.camera_position = cpos return self.reset_camera()
[ "def", "view_vector", "(", "self", ",", "vector", ",", "viewup", "=", "None", ")", ":", "focal_pt", "=", "self", ".", "center", "if", "viewup", "is", "None", ":", "viewup", "=", "rcParams", "[", "'camera'", "]", "[", "'viewup'", "]", "cpos", "=", "["...
Point the camera in the direction of the given vector
[ "Point", "the", "camera", "in", "the", "direction", "of", "the", "given", "vector" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L706-L714
train
217,487
vtkiorg/vtki
vtki/renderer.py
Renderer.view_xy
def view_xy(self, negative=False): """View the XY plane""" vec = np.array([0,0,1]) viewup = np.array([0,1,0]) if negative: vec = np.array([0,0,-1]) return self.view_vector(vec, viewup)
python
def view_xy(self, negative=False): """View the XY plane""" vec = np.array([0,0,1]) viewup = np.array([0,1,0]) if negative: vec = np.array([0,0,-1]) return self.view_vector(vec, viewup)
[ "def", "view_xy", "(", "self", ",", "negative", "=", "False", ")", ":", "vec", "=", "np", ".", "array", "(", "[", "0", ",", "0", ",", "1", "]", ")", "viewup", "=", "np", ".", "array", "(", "[", "0", ",", "1", ",", "0", "]", ")", "if", "ne...
View the XY plane
[ "View", "the", "XY", "plane" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L716-L722
train
217,488
vtkiorg/vtki
vtki/utilities.py
cell_scalar
def cell_scalar(mesh, name): """ Returns cell scalars of a vtk object """ vtkarr = mesh.GetCellData().GetArray(name) if vtkarr: if isinstance(vtkarr, vtk.vtkBitArray): vtkarr = vtk_bit_array_to_char(vtkarr) return vtk_to_numpy(vtkarr)
python
def cell_scalar(mesh, name): """ Returns cell scalars of a vtk object """ vtkarr = mesh.GetCellData().GetArray(name) if vtkarr: if isinstance(vtkarr, vtk.vtkBitArray): vtkarr = vtk_bit_array_to_char(vtkarr) return vtk_to_numpy(vtkarr)
[ "def", "cell_scalar", "(", "mesh", ",", "name", ")", ":", "vtkarr", "=", "mesh", ".", "GetCellData", "(", ")", ".", "GetArray", "(", "name", ")", "if", "vtkarr", ":", "if", "isinstance", "(", "vtkarr", ",", "vtk", ".", "vtkBitArray", ")", ":", "vtkar...
Returns cell scalars of a vtk object
[ "Returns", "cell", "scalars", "of", "a", "vtk", "object" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L44-L50
train
217,489
vtkiorg/vtki
vtki/utilities.py
vtk_points
def vtk_points(points, deep=True): """ Convert numpy points to a vtkPoints object """ if not points.flags['C_CONTIGUOUS']: points = np.ascontiguousarray(points) vtkpts = vtk.vtkPoints() vtkpts.SetData(numpy_to_vtk(points, deep=deep)) return vtkpts
python
def vtk_points(points, deep=True): """ Convert numpy points to a vtkPoints object """ if not points.flags['C_CONTIGUOUS']: points = np.ascontiguousarray(points) vtkpts = vtk.vtkPoints() vtkpts.SetData(numpy_to_vtk(points, deep=deep)) return vtkpts
[ "def", "vtk_points", "(", "points", ",", "deep", "=", "True", ")", ":", "if", "not", "points", ".", "flags", "[", "'C_CONTIGUOUS'", "]", ":", "points", "=", "np", ".", "ascontiguousarray", "(", "points", ")", "vtkpts", "=", "vtk", ".", "vtkPoints", "("...
Convert numpy points to a vtkPoints object
[ "Convert", "numpy", "points", "to", "a", "vtkPoints", "object" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L109-L115
train
217,490
vtkiorg/vtki
vtki/utilities.py
lines_from_points
def lines_from_points(points): """ Generates line from points. Assumes points are ordered as line segments. Parameters ---------- points : np.ndarray Points representing line segments. For example, two line segments would be represented as: np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]]) Returns ------- lines : vtki.PolyData PolyData with lines and cells. Examples -------- This example plots two line segments at right angles to each other line. >>> import vtki >>> import numpy as np >>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]]) >>> lines = vtki.lines_from_points(points) >>> lines.plot() # doctest:+SKIP """ # Assuming ordered points, create array defining line order npoints = points.shape[0] - 1 lines = np.vstack((2 * np.ones(npoints, np.int), np.arange(npoints), np.arange(1, npoints + 1))).T.ravel() return vtki.PolyData(points, lines)
python
def lines_from_points(points): """ Generates line from points. Assumes points are ordered as line segments. Parameters ---------- points : np.ndarray Points representing line segments. For example, two line segments would be represented as: np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]]) Returns ------- lines : vtki.PolyData PolyData with lines and cells. Examples -------- This example plots two line segments at right angles to each other line. >>> import vtki >>> import numpy as np >>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]]) >>> lines = vtki.lines_from_points(points) >>> lines.plot() # doctest:+SKIP """ # Assuming ordered points, create array defining line order npoints = points.shape[0] - 1 lines = np.vstack((2 * np.ones(npoints, np.int), np.arange(npoints), np.arange(1, npoints + 1))).T.ravel() return vtki.PolyData(points, lines)
[ "def", "lines_from_points", "(", "points", ")", ":", "# Assuming ordered points, create array defining line order", "npoints", "=", "points", ".", "shape", "[", "0", "]", "-", "1", "lines", "=", "np", ".", "vstack", "(", "(", "2", "*", "np", ".", "ones", "("...
Generates line from points. Assumes points are ordered as line segments. Parameters ---------- points : np.ndarray Points representing line segments. For example, two line segments would be represented as: np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]]) Returns ------- lines : vtki.PolyData PolyData with lines and cells. Examples -------- This example plots two line segments at right angles to each other line. >>> import vtki >>> import numpy as np >>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]]) >>> lines = vtki.lines_from_points(points) >>> lines.plot() # doctest:+SKIP
[ "Generates", "line", "from", "points", ".", "Assumes", "points", "are", "ordered", "as", "line", "segments", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L118-L152
train
217,491
vtkiorg/vtki
vtki/utilities.py
vector_poly_data
def vector_poly_data(orig, vec): """ Creates a vtkPolyData object composed of vectors """ # shape, dimention checking if not isinstance(orig, np.ndarray): orig = np.asarray(orig) if not isinstance(vec, np.ndarray): vec = np.asarray(vec) if orig.ndim != 2: orig = orig.reshape((-1, 3)) elif orig.shape[1] != 3: raise Exception('orig array must be 3D') if vec.ndim != 2: vec = vec.reshape((-1, 3)) elif vec.shape[1] != 3: raise Exception('vec array must be 3D') # Create vtk points and cells objects vpts = vtk.vtkPoints() vpts.SetData(numpy_to_vtk(np.ascontiguousarray(orig), deep=True)) npts = orig.shape[0] cells = np.hstack((np.ones((npts, 1), 'int'), np.arange(npts).reshape((-1, 1)))) if cells.dtype != ctypes.c_int64 or cells.flags.c_contiguous: cells = np.ascontiguousarray(cells, ctypes.c_int64) cells = np.reshape(cells, (2*npts)) vcells = vtk.vtkCellArray() vcells.SetCells(npts, numpy_to_vtkIdTypeArray(cells, deep=True)) # Create vtkPolyData object pdata = vtk.vtkPolyData() pdata.SetPoints(vpts) pdata.SetVerts(vcells) # Add vectors to polydata name = 'vectors' vtkfloat = numpy_to_vtk(np.ascontiguousarray(vec), deep=True) vtkfloat.SetName(name) pdata.GetPointData().AddArray(vtkfloat) pdata.GetPointData().SetActiveVectors(name) # Add magnitude of vectors to polydata name = 'mag' scalars = (vec * vec).sum(1)**0.5 vtkfloat = numpy_to_vtk(np.ascontiguousarray(scalars), deep=True) vtkfloat.SetName(name) pdata.GetPointData().AddArray(vtkfloat) pdata.GetPointData().SetActiveScalars(name) return vtki.PolyData(pdata)
python
def vector_poly_data(orig, vec): """ Creates a vtkPolyData object composed of vectors """ # shape, dimention checking if not isinstance(orig, np.ndarray): orig = np.asarray(orig) if not isinstance(vec, np.ndarray): vec = np.asarray(vec) if orig.ndim != 2: orig = orig.reshape((-1, 3)) elif orig.shape[1] != 3: raise Exception('orig array must be 3D') if vec.ndim != 2: vec = vec.reshape((-1, 3)) elif vec.shape[1] != 3: raise Exception('vec array must be 3D') # Create vtk points and cells objects vpts = vtk.vtkPoints() vpts.SetData(numpy_to_vtk(np.ascontiguousarray(orig), deep=True)) npts = orig.shape[0] cells = np.hstack((np.ones((npts, 1), 'int'), np.arange(npts).reshape((-1, 1)))) if cells.dtype != ctypes.c_int64 or cells.flags.c_contiguous: cells = np.ascontiguousarray(cells, ctypes.c_int64) cells = np.reshape(cells, (2*npts)) vcells = vtk.vtkCellArray() vcells.SetCells(npts, numpy_to_vtkIdTypeArray(cells, deep=True)) # Create vtkPolyData object pdata = vtk.vtkPolyData() pdata.SetPoints(vpts) pdata.SetVerts(vcells) # Add vectors to polydata name = 'vectors' vtkfloat = numpy_to_vtk(np.ascontiguousarray(vec), deep=True) vtkfloat.SetName(name) pdata.GetPointData().AddArray(vtkfloat) pdata.GetPointData().SetActiveVectors(name) # Add magnitude of vectors to polydata name = 'mag' scalars = (vec * vec).sum(1)**0.5 vtkfloat = numpy_to_vtk(np.ascontiguousarray(scalars), deep=True) vtkfloat.SetName(name) pdata.GetPointData().AddArray(vtkfloat) pdata.GetPointData().SetActiveScalars(name) return vtki.PolyData(pdata)
[ "def", "vector_poly_data", "(", "orig", ",", "vec", ")", ":", "# shape, dimention checking", "if", "not", "isinstance", "(", "orig", ",", "np", ".", "ndarray", ")", ":", "orig", "=", "np", ".", "asarray", "(", "orig", ")", "if", "not", "isinstance", "(",...
Creates a vtkPolyData object composed of vectors
[ "Creates", "a", "vtkPolyData", "object", "composed", "of", "vectors" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L155-L209
train
217,492
vtkiorg/vtki
vtki/utilities.py
trans_from_matrix
def trans_from_matrix(matrix): """ Convert a vtk matrix to a numpy.ndarray """ t = np.zeros((4, 4)) for i in range(4): for j in range(4): t[i, j] = matrix.GetElement(i, j) return t
python
def trans_from_matrix(matrix): """ Convert a vtk matrix to a numpy.ndarray """ t = np.zeros((4, 4)) for i in range(4): for j in range(4): t[i, j] = matrix.GetElement(i, j) return t
[ "def", "trans_from_matrix", "(", "matrix", ")", ":", "t", "=", "np", ".", "zeros", "(", "(", "4", ",", "4", ")", ")", "for", "i", "in", "range", "(", "4", ")", ":", "for", "j", "in", "range", "(", "4", ")", ":", "t", "[", "i", ",", "j", "...
Convert a vtk matrix to a numpy.ndarray
[ "Convert", "a", "vtk", "matrix", "to", "a", "numpy", ".", "ndarray" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L212-L218
train
217,493
vtkiorg/vtki
vtki/utilities.py
wrap
def wrap(vtkdataset): """This is a convenience method to safely wrap any given VTK data object to its appropriate ``vtki`` data object. """ wrappers = { 'vtkUnstructuredGrid' : vtki.UnstructuredGrid, 'vtkRectilinearGrid' : vtki.RectilinearGrid, 'vtkStructuredGrid' : vtki.StructuredGrid, 'vtkPolyData' : vtki.PolyData, 'vtkImageData' : vtki.UniformGrid, 'vtkStructuredPoints' : vtki.UniformGrid, 'vtkMultiBlockDataSet' : vtki.MultiBlock, } key = vtkdataset.GetClassName() try: wrapped = wrappers[key](vtkdataset) except: logging.warning('VTK data type ({}) is not currently supported by vtki.'.format(key)) return vtkdataset # if not supported just passes the VTK data object return wrapped
python
def wrap(vtkdataset): """This is a convenience method to safely wrap any given VTK data object to its appropriate ``vtki`` data object. """ wrappers = { 'vtkUnstructuredGrid' : vtki.UnstructuredGrid, 'vtkRectilinearGrid' : vtki.RectilinearGrid, 'vtkStructuredGrid' : vtki.StructuredGrid, 'vtkPolyData' : vtki.PolyData, 'vtkImageData' : vtki.UniformGrid, 'vtkStructuredPoints' : vtki.UniformGrid, 'vtkMultiBlockDataSet' : vtki.MultiBlock, } key = vtkdataset.GetClassName() try: wrapped = wrappers[key](vtkdataset) except: logging.warning('VTK data type ({}) is not currently supported by vtki.'.format(key)) return vtkdataset # if not supported just passes the VTK data object return wrapped
[ "def", "wrap", "(", "vtkdataset", ")", ":", "wrappers", "=", "{", "'vtkUnstructuredGrid'", ":", "vtki", ".", "UnstructuredGrid", ",", "'vtkRectilinearGrid'", ":", "vtki", ".", "RectilinearGrid", ",", "'vtkStructuredGrid'", ":", "vtki", ".", "StructuredGrid", ",", ...
This is a convenience method to safely wrap any given VTK data object to its appropriate ``vtki`` data object.
[ "This", "is", "a", "convenience", "method", "to", "safely", "wrap", "any", "given", "VTK", "data", "object", "to", "its", "appropriate", "vtki", "data", "object", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L221-L240
train
217,494
vtkiorg/vtki
vtki/utilities.py
image_to_texture
def image_to_texture(image): """Converts ``vtkImageData`` to a ``vtkTexture``""" vtex = vtk.vtkTexture() vtex.SetInputDataObject(image) vtex.Update() return vtex
python
def image_to_texture(image): """Converts ``vtkImageData`` to a ``vtkTexture``""" vtex = vtk.vtkTexture() vtex.SetInputDataObject(image) vtex.Update() return vtex
[ "def", "image_to_texture", "(", "image", ")", ":", "vtex", "=", "vtk", ".", "vtkTexture", "(", ")", "vtex", ".", "SetInputDataObject", "(", "image", ")", "vtex", ".", "Update", "(", ")", "return", "vtex" ]
Converts ``vtkImageData`` to a ``vtkTexture``
[ "Converts", "vtkImageData", "to", "a", "vtkTexture" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L243-L248
train
217,495
vtkiorg/vtki
vtki/utilities.py
numpy_to_texture
def numpy_to_texture(image): """Convert a NumPy image array to a vtk.vtkTexture""" if not isinstance(image, np.ndarray): raise TypeError('Unknown input type ({})'.format(type(image))) if image.ndim != 3 or image.shape[2] != 3: raise AssertionError('Input image must be nn by nm by RGB') grid = vtki.UniformGrid((image.shape[1], image.shape[0], 1)) grid.point_arrays['Image'] = np.flip(image.swapaxes(0,1), axis=1).reshape((-1, 3), order='F') grid.set_active_scalar('Image') return image_to_texture(grid)
python
def numpy_to_texture(image): """Convert a NumPy image array to a vtk.vtkTexture""" if not isinstance(image, np.ndarray): raise TypeError('Unknown input type ({})'.format(type(image))) if image.ndim != 3 or image.shape[2] != 3: raise AssertionError('Input image must be nn by nm by RGB') grid = vtki.UniformGrid((image.shape[1], image.shape[0], 1)) grid.point_arrays['Image'] = np.flip(image.swapaxes(0,1), axis=1).reshape((-1, 3), order='F') grid.set_active_scalar('Image') return image_to_texture(grid)
[ "def", "numpy_to_texture", "(", "image", ")", ":", "if", "not", "isinstance", "(", "image", ",", "np", ".", "ndarray", ")", ":", "raise", "TypeError", "(", "'Unknown input type ({})'", ".", "format", "(", "type", "(", "image", ")", ")", ")", "if", "image...
Convert a NumPy image array to a vtk.vtkTexture
[ "Convert", "a", "NumPy", "image", "array", "to", "a", "vtk", ".", "vtkTexture" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L251-L260
train
217,496
vtkiorg/vtki
vtki/utilities.py
is_inside_bounds
def is_inside_bounds(point, bounds): """ Checks if a point is inside a set of bounds. This is implemented through recursion so that this is N-dimensional. """ if isinstance(point, (int, float)): point = [point] if isinstance(point, collections.Iterable) and not isinstance(point, collections.deque): if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0: raise AssertionError('Bounds mismatch point dimensionality') point = collections.deque(point) bounds = collections.deque(bounds) return is_inside_bounds(point, bounds) if not isinstance(point, collections.deque): raise TypeError('Unknown input data type ({}).'.format(type(point))) if len(point) < 1: return True p = point.popleft() lower, upper = bounds.popleft(), bounds.popleft() if lower <= p <= upper: return is_inside_bounds(point, bounds) return False
python
def is_inside_bounds(point, bounds): """ Checks if a point is inside a set of bounds. This is implemented through recursion so that this is N-dimensional. """ if isinstance(point, (int, float)): point = [point] if isinstance(point, collections.Iterable) and not isinstance(point, collections.deque): if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0: raise AssertionError('Bounds mismatch point dimensionality') point = collections.deque(point) bounds = collections.deque(bounds) return is_inside_bounds(point, bounds) if not isinstance(point, collections.deque): raise TypeError('Unknown input data type ({}).'.format(type(point))) if len(point) < 1: return True p = point.popleft() lower, upper = bounds.popleft(), bounds.popleft() if lower <= p <= upper: return is_inside_bounds(point, bounds) return False
[ "def", "is_inside_bounds", "(", "point", ",", "bounds", ")", ":", "if", "isinstance", "(", "point", ",", "(", "int", ",", "float", ")", ")", ":", "point", "=", "[", "point", "]", "if", "isinstance", "(", "point", ",", "collections", ".", "Iterable", ...
Checks if a point is inside a set of bounds. This is implemented through recursion so that this is N-dimensional.
[ "Checks", "if", "a", "point", "is", "inside", "a", "set", "of", "bounds", ".", "This", "is", "implemented", "through", "recursion", "so", "that", "this", "is", "N", "-", "dimensional", "." ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L263-L283
train
217,497
vtkiorg/vtki
vtki/utilities.py
fit_plane_to_points
def fit_plane_to_points(points, return_meta=False): """ Fits a plane to a set of points Parameters ---------- points : np.ndarray Size n by 3 array of points to fit a plane through return_meta : bool If true, also returns the center and normal used to generate the plane """ data = np.array(points) center = data.mean(axis=0) result = np.linalg.svd(data - center) normal = np.cross(result[2][0], result[2][1]) plane = vtki.Plane(center=center, direction=normal) if return_meta: return plane, center, normal return plane
python
def fit_plane_to_points(points, return_meta=False): """ Fits a plane to a set of points Parameters ---------- points : np.ndarray Size n by 3 array of points to fit a plane through return_meta : bool If true, also returns the center and normal used to generate the plane """ data = np.array(points) center = data.mean(axis=0) result = np.linalg.svd(data - center) normal = np.cross(result[2][0], result[2][1]) plane = vtki.Plane(center=center, direction=normal) if return_meta: return plane, center, normal return plane
[ "def", "fit_plane_to_points", "(", "points", ",", "return_meta", "=", "False", ")", ":", "data", "=", "np", ".", "array", "(", "points", ")", "center", "=", "data", ".", "mean", "(", "axis", "=", "0", ")", "result", "=", "np", ".", "linalg", ".", "...
Fits a plane to a set of points Parameters ---------- points : np.ndarray Size n by 3 array of points to fit a plane through return_meta : bool If true, also returns the center and normal used to generate the plane
[ "Fits", "a", "plane", "to", "a", "set", "of", "points" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/utilities.py#L286-L305
train
217,498
vtkiorg/vtki
vtki/plotting.py
set_plot_theme
def set_plot_theme(theme): """Set the plotting parameters to a predefined theme""" if theme.lower() in ['paraview', 'pv']: rcParams['background'] = PV_BACKGROUND rcParams['cmap'] = 'coolwarm' rcParams['font']['family'] = 'arial' rcParams['font']['label_size'] = 16 rcParams['show_edges'] = False elif theme.lower() in ['document', 'doc', 'paper', 'report']: rcParams['background'] = 'white' rcParams['cmap'] = 'viridis' rcParams['font']['size'] = 18 rcParams['font']['title_size'] = 18 rcParams['font']['label_size'] = 18 rcParams['font']['color'] = 'black' rcParams['show_edges'] = False rcParams['color'] = 'tan' rcParams['outline_color'] = 'black' elif theme.lower() in ['night', 'dark']: rcParams['background'] = 'black' rcParams['cmap'] = 'viridis' rcParams['font']['color'] = 'white' rcParams['show_edges'] = False rcParams['color'] = 'tan' rcParams['outline_color'] = 'white' elif theme.lower() in ['default']: for k,v in DEFAULT_THEME.items(): rcParams[k] = v
python
def set_plot_theme(theme): """Set the plotting parameters to a predefined theme""" if theme.lower() in ['paraview', 'pv']: rcParams['background'] = PV_BACKGROUND rcParams['cmap'] = 'coolwarm' rcParams['font']['family'] = 'arial' rcParams['font']['label_size'] = 16 rcParams['show_edges'] = False elif theme.lower() in ['document', 'doc', 'paper', 'report']: rcParams['background'] = 'white' rcParams['cmap'] = 'viridis' rcParams['font']['size'] = 18 rcParams['font']['title_size'] = 18 rcParams['font']['label_size'] = 18 rcParams['font']['color'] = 'black' rcParams['show_edges'] = False rcParams['color'] = 'tan' rcParams['outline_color'] = 'black' elif theme.lower() in ['night', 'dark']: rcParams['background'] = 'black' rcParams['cmap'] = 'viridis' rcParams['font']['color'] = 'white' rcParams['show_edges'] = False rcParams['color'] = 'tan' rcParams['outline_color'] = 'white' elif theme.lower() in ['default']: for k,v in DEFAULT_THEME.items(): rcParams[k] = v
[ "def", "set_plot_theme", "(", "theme", ")", ":", "if", "theme", ".", "lower", "(", ")", "in", "[", "'paraview'", ",", "'pv'", "]", ":", "rcParams", "[", "'background'", "]", "=", "PV_BACKGROUND", "rcParams", "[", "'cmap'", "]", "=", "'coolwarm'", "rcPara...
Set the plotting parameters to a predefined theme
[ "Set", "the", "plotting", "parameters", "to", "a", "predefined", "theme" ]
5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L85-L112
train
217,499