text stringlengths 81 112k |
|---|
Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type) |
Gets whois information for a domain
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json |
Gets whois history for a domain
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json |
Gets the domains that have been registered with a nameserver or
nameservers
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json |
Searches for domains that match a given pattern
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params) |
Return an object representing the samples identified by the input domain, IP, or URL
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params) |
Return an object representing the sample identified by the input hash, or an empty object if that sample is not found
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params) |
Gets the AS information for a given IP address.
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json |
Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json |
Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json |
Absolute value
def abs(x):
"""
Absolute value
"""
if isinstance(x, UncertainFunction):
mcpts = np.abs(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.abs(x) |
Inverse cosine
def acos(x):
"""
Inverse cosine
"""
if isinstance(x, UncertainFunction):
mcpts = np.arccos(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arccos(x) |
Inverse hyperbolic cosine
def acosh(x):
"""
Inverse hyperbolic cosine
"""
if isinstance(x, UncertainFunction):
mcpts = np.arccosh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arccosh(x) |
Inverse sine
def asin(x):
"""
Inverse sine
"""
if isinstance(x, UncertainFunction):
mcpts = np.arcsin(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arcsin(x) |
Inverse hyperbolic sine
def asinh(x):
"""
Inverse hyperbolic sine
"""
if isinstance(x, UncertainFunction):
mcpts = np.arcsinh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arcsinh(x) |
Inverse tangent
def atan(x):
"""
Inverse tangent
"""
if isinstance(x, UncertainFunction):
mcpts = np.arctan(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arctan(x) |
Inverse hyperbolic tangent
def atanh(x):
"""
Inverse hyperbolic tangent
"""
if isinstance(x, UncertainFunction):
mcpts = np.arctanh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.arctanh(x) |
Ceiling function (round towards positive infinity)
def ceil(x):
"""
Ceiling function (round towards positive infinity)
"""
if isinstance(x, UncertainFunction):
mcpts = np.ceil(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.ceil(x) |
Cosine
def cos(x):
"""
Cosine
"""
if isinstance(x, UncertainFunction):
mcpts = np.cos(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.cos(x) |
Hyperbolic cosine
def cosh(x):
"""
Hyperbolic cosine
"""
if isinstance(x, UncertainFunction):
mcpts = np.cosh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.cosh(x) |
Convert radians to degrees
def degrees(x):
"""
Convert radians to degrees
"""
if isinstance(x, UncertainFunction):
mcpts = np.degrees(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.degrees(x) |
Exponential function
def exp(x):
"""
Exponential function
"""
if isinstance(x, UncertainFunction):
mcpts = np.exp(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.exp(x) |
Calculate exp(x) - 1
def expm1(x):
"""
Calculate exp(x) - 1
"""
if isinstance(x, UncertainFunction):
mcpts = np.expm1(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.expm1(x) |
Absolute value function
def fabs(x):
"""
Absolute value function
"""
if isinstance(x, UncertainFunction):
mcpts = np.fabs(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.fabs(x) |
Floor function (round towards negative infinity)
def floor(x):
"""
Floor function (round towards negative infinity)
"""
if isinstance(x, UncertainFunction):
mcpts = np.floor(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.floor(x) |
Calculate the hypotenuse given two "legs" of a right triangle
def hypot(x, y):
"""
Calculate the hypotenuse given two "legs" of a right triangle
"""
if isinstance(x, UncertainFunction) or isinstance(x, UncertainFunction):
ufx = to_uncertain_func(x)
ufy = to_uncertain_func(y)
mcpts = np.hypot(ufx._mcpts, ufy._mcpts)
return UncertainFunction(mcpts)
else:
return np.hypot(x, y) |
Natural logarithm
def log(x):
"""
Natural logarithm
"""
if isinstance(x, UncertainFunction):
mcpts = np.log(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.log(x) |
Base-10 logarithm
def log10(x):
"""
Base-10 logarithm
"""
if isinstance(x, UncertainFunction):
mcpts = np.log10(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.log10(x) |
Natural logarithm of (1 + x)
def log1p(x):
"""
Natural logarithm of (1 + x)
"""
if isinstance(x, UncertainFunction):
mcpts = np.log1p(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.log1p(x) |
Convert degrees to radians
def radians(x):
"""
Convert degrees to radians
"""
if isinstance(x, UncertainFunction):
mcpts = np.radians(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.radians(x) |
Sine
def sin(x):
"""
Sine
"""
if isinstance(x, UncertainFunction):
mcpts = np.sin(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.sin(x) |
Hyperbolic sine
def sinh(x):
"""
Hyperbolic sine
"""
if isinstance(x, UncertainFunction):
mcpts = np.sinh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.sinh(x) |
Square-root function
def sqrt(x):
"""
Square-root function
"""
if isinstance(x, UncertainFunction):
mcpts = np.sqrt(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.sqrt(x) |
Tangent
def tan(x):
"""
Tangent
"""
if isinstance(x, UncertainFunction):
mcpts = np.tan(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.tan(x) |
Hyperbolic tangent
def tanh(x):
"""
Hyperbolic tangent
"""
if isinstance(x, UncertainFunction):
mcpts = np.tanh(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.tanh(x) |
Truncate the values to the integer value without rounding
def trunc(x):
"""
Truncate the values to the integer value without rounding
"""
if isinstance(x, UncertainFunction):
mcpts = np.trunc(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.trunc(x) |
Create a Latin-Hypercube sample design based on distributions defined in the
`scipy.stats` module
Parameters
----------
dist: array_like
frozen scipy.stats.rv_continuous or rv_discrete distribution objects
that are defined previous to calling LHD
size: int
integer value for the number of samples to generate for each
distribution object
dims: int, optional
if dist is a single distribution object, and dims > 1, the one
distribution will be used to generate a size-by-dims sampled design
form: str, optional (non-functional at the moment)
determines how the sampling is to occur, with the following optional
values:
- 'randomized' - completely randomized sampling
- 'spacefilling' - space-filling sampling (generally gives a more
accurate sampling of the design when the number of sample points
is small)
- 'orthogonal' - balanced space-filling sampling (experimental)
The 'spacefilling' and 'orthogonal' forms require some iterations to
determine the optimal sampling pattern.
iterations: int, optional (non-functional at the moment)
used to control the number of allowable search iterations for generating
'spacefilling' and 'orthogonal' designs
Returns
-------
out: 2d-array,
A 2d-array where each column corresponds to each input distribution and
each row is a sample in the design
Examples
--------
Single distribution:
- uniform distribution, low = -1, width = 2
>>> import scipy.stats as ss
>>> d0 = ss.uniform(loc=-1,scale=2)
>>> print lhd(dist=d0,size=5)
[[ 0.51031081]
[-0.28961427]
[-0.68342107]
[ 0.69784371]
[ 0.12248842]]
Single distribution for multiple variables:
- normal distribution, mean = 0, stdev = 1
>>> d1 = ss.norm(loc=0,scale=1)
>>> print lhd(dist=d1,size=7,dims=5)
[[-0.8612785 0.23034412 0.21808001]
[ 0.0455778 0.07001606 0.31586419]
[-0.978553 0.30394663 0.78483995]
[-0.26415983 0.15235896 0.51462024]
[ 0.80805686 0.38891031 0.02076505]
[ 1.63028931 0.52104917 1.48016008]]
Multiple distributions:
- beta distribution, alpha = 2, beta = 5
- exponential distribution, lambda = 1.5
>>> d2 = ss.beta(2,5)
>>> d3 = ss.expon(scale=1/1.5)
>>> print lhd(dist=(d1,d2,d3),size=6)
[[-0.8612785 0.23034412 0.21808001]
[ 0.0455778 0.07001606 0.31586419]
[-0.978553 0.30394663 0.78483995]
[-0.26415983 0.15235896 0.51462024]
[ 0.80805686 0.38891031 0.02076505]
[ 1.63028931 0.52104917 1.48016008]]
def lhd(
dist=None,
size=None,
dims=1,
form="randomized",
iterations=100,
showcorrelations=False,
):
"""
Create a Latin-Hypercube sample design based on distributions defined in the
`scipy.stats` module
Parameters
----------
dist: array_like
frozen scipy.stats.rv_continuous or rv_discrete distribution objects
that are defined previous to calling LHD
size: int
integer value for the number of samples to generate for each
distribution object
dims: int, optional
if dist is a single distribution object, and dims > 1, the one
distribution will be used to generate a size-by-dims sampled design
form: str, optional (non-functional at the moment)
determines how the sampling is to occur, with the following optional
values:
- 'randomized' - completely randomized sampling
- 'spacefilling' - space-filling sampling (generally gives a more
accurate sampling of the design when the number of sample points
is small)
- 'orthogonal' - balanced space-filling sampling (experimental)
The 'spacefilling' and 'orthogonal' forms require some iterations to
determine the optimal sampling pattern.
iterations: int, optional (non-functional at the moment)
used to control the number of allowable search iterations for generating
'spacefilling' and 'orthogonal' designs
Returns
-------
out: 2d-array,
A 2d-array where each column corresponds to each input distribution and
each row is a sample in the design
Examples
--------
Single distribution:
- uniform distribution, low = -1, width = 2
>>> import scipy.stats as ss
>>> d0 = ss.uniform(loc=-1,scale=2)
>>> print lhd(dist=d0,size=5)
[[ 0.51031081]
[-0.28961427]
[-0.68342107]
[ 0.69784371]
[ 0.12248842]]
Single distribution for multiple variables:
- normal distribution, mean = 0, stdev = 1
>>> d1 = ss.norm(loc=0,scale=1)
>>> print lhd(dist=d1,size=7,dims=5)
[[-0.8612785 0.23034412 0.21808001]
[ 0.0455778 0.07001606 0.31586419]
[-0.978553 0.30394663 0.78483995]
[-0.26415983 0.15235896 0.51462024]
[ 0.80805686 0.38891031 0.02076505]
[ 1.63028931 0.52104917 1.48016008]]
Multiple distributions:
- beta distribution, alpha = 2, beta = 5
- exponential distribution, lambda = 1.5
>>> d2 = ss.beta(2,5)
>>> d3 = ss.expon(scale=1/1.5)
>>> print lhd(dist=(d1,d2,d3),size=6)
[[-0.8612785 0.23034412 0.21808001]
[ 0.0455778 0.07001606 0.31586419]
[-0.978553 0.30394663 0.78483995]
[-0.26415983 0.15235896 0.51462024]
[ 0.80805686 0.38891031 0.02076505]
[ 1.63028931 0.52104917 1.48016008]]
"""
assert dims > 0, 'kwarg "dims" must be at least 1'
if not size or not dist:
return None
def _lhs(x, samples=20):
"""
_lhs(x) returns a latin-hypercube matrix (each row is a different
set of sample inputs) using a default sample size of 20 for each column
of X. X must be a 2xN matrix that contains the lower and upper bounds of
each column. The lower bound(s) should be in the first row and the upper
bound(s) should be in the second row.
_lhs(x,samples=N) uses the sample size of N instead of the default (20).
Example:
>>> x = np.array([[0,-1,3],[1,2,6]])
>>> print 'x:'; print x
x:
[[ 0 -1 3]
[ 1 2 6]]
>>> print 'lhs(x):'; print _lhs(x)
lhs(x):
[[ 0.02989122 -0.93918734 3.14432618]
[ 0.08869833 -0.82140706 3.19875152]
[ 0.10627442 -0.66999234 3.33814979]
[ 0.15202861 -0.44157763 3.57036894]
[ 0.2067089 -0.34845384 3.66930908]
[ 0.26542056 -0.23706445 3.76361414]
[ 0.34201421 -0.00779306 3.90818257]
[ 0.37891646 0.15458423 4.15031708]
[ 0.43501575 0.23561118 4.20320064]
[ 0.4865449 0.36350601 4.45792314]
[ 0.54804367 0.56069855 4.60911539]
[ 0.59400712 0.7468415 4.69923486]
[ 0.63708876 0.9159176 4.83611204]
[ 0.68819855 0.98596354 4.97659182]
[ 0.7368695 1.18923511 5.11135111]
[ 0.78885724 1.28369441 5.2900157 ]
[ 0.80966513 1.47415703 5.4081971 ]
[ 0.86196731 1.57844205 5.61067689]
[ 0.94784517 1.71823504 5.78021164]
[ 0.96739728 1.94169017 5.88604772]]
>>> print 'lhs(x,samples=5):'; print _lhs(x,samples=5)
lhs(x,samples=5):
[[ 0.1949127 -0.54124725 3.49238369]
[ 0.21128576 -0.13439798 3.65652016]
[ 0.47516308 0.39957406 4.5797308 ]
[ 0.64400392 0.90890999 4.92379431]
[ 0.96279472 1.79415307 5.52028238]]
"""
# determine the segment size
segmentSize = 1.0 / samples
# get the number of dimensions to sample (number of columns)
numVars = x.shape[1]
# populate each dimension
out = np.zeros((samples, numVars))
pointValue = np.zeros(samples)
for n in range(numVars):
for i in range(samples):
segmentMin = i * segmentSize
point = segmentMin + (np.random.random() * segmentSize)
pointValue[i] = (point * (x[1, n] - x[0, n])) + x[0, n]
out[:, n] = pointValue
# now randomly arrange the different segments
return _mix(out)
def _mix(data, dim="cols"):
"""
Takes a data matrix and mixes up the values along dim (either "rows" or
"cols"). In other words, if dim='rows', then each row's data is mixed
ONLY WITHIN ITSELF. Likewise, if dim='cols', then each column's data is
mixed ONLY WITHIN ITSELF.
"""
data = np.atleast_2d(data)
n = data.shape[0]
if dim == "rows":
data = data.T
data_rank = list(range(n))
for i in range(data.shape[1]):
new_data_rank = np.random.permutation(data_rank)
vals, order = np.unique(
np.hstack((data_rank, new_data_rank)), return_inverse=True
)
old_order = order[:n]
new_order = order[-n:]
tmp = data[np.argsort(old_order), i][new_order]
data[:, i] = tmp[:]
if dim == "rows":
data = data.T
return data
if form is "randomized":
if hasattr(dist, "__getitem__"): # if multiple distributions were input
nvars = len(dist)
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = _lhs(x, samples=size)
dist_data = np.empty_like(unif_data)
for i, d in enumerate(dist):
dist_data[:, i] = d.ppf(unif_data[:, i])
else: # if a single distribution was input
nvars = dims
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = _lhs(x, samples=size)
dist_data = np.empty_like(unif_data)
for i in range(nvars):
dist_data[:, i] = dist.ppf(unif_data[:, i])
elif form is "spacefilling":
def euclid_distance(arr):
n = arr.shape[0]
ans = 0.0
for i in range(n - 1):
for j in range(i + 1, n):
d = np.sqrt(
np.sum(
[(arr[i, k] - arr[j, k]) ** 2 for k in range(arr.shape[1])]
)
)
ans += 1.0 / d ** 2
return ans
def fill_space(data):
best = 1e8
for it in range(iterations):
d = euclid_distance(data)
if d < best:
d_opt = d
data_opt = data.copy()
data = _mix(data)
print("Optimized Distance:", d_opt)
return data_opt
if hasattr(dist, "__getitem__"): # if multiple distributions were input
nvars = len(dist)
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = fill_space(_lhs(x, samples=size))
dist_data = np.empty_like(unif_data)
for i, d in enumerate(dist):
dist_data[:, i] = d.ppf(unif_data[:, i])
else: # if a single distribution was input
nvars = dims
x = np.vstack((np.zeros(nvars), np.ones(nvars)))
unif_data = fill_space(_lhs(x, samples=size))
dist_data = np.empty_like(unif_data)
for i in range(nvars):
dist_data[:, i] = dist.ppf(unif_data[:, i])
elif form is "orthogonal":
raise NotImplementedError(
"Sorry. The orthogonal space-filling algorithm hasn't been implemented yet."
)
else:
raise ValueError('Invalid "form" value: %s' % (form))
if dist_data.shape[1] > 1:
cor_matrix = np.zeros((nvars, nvars))
for i in range(nvars):
for j in range(nvars):
x_data = dist_data[:, i].copy()
y_data = dist_data[:, j].copy()
x_mean = x_data.mean()
y_mean = y_data.mean()
num = np.sum((x_data - x_mean) * (y_data - y_mean))
den = np.sqrt(
np.sum((x_data - x_mean) ** 2) * np.sum((y_data - y_mean) ** 2)
)
cor_matrix[i, j] = num / den
cor_matrix[j, i] = num / den
inv_cor_matrix = np.linalg.pinv(cor_matrix)
VIF = np.max(np.diag(inv_cor_matrix))
if showcorrelations:
print("Correlation Matrix:\n", cor_matrix)
print("Inverted Correlation Matrix:\n", inv_cor_matrix)
print("Variance Inflation Factor (VIF):", VIF)
return dist_data |
Transforms x into an UncertainFunction-compatible object,
unless it is already an UncertainFunction (in which case x is returned
unchanged).
Raises an exception unless 'x' belongs to some specific classes of
objects that are known not to depend on UncertainFunction objects
(which then cannot be considered as constants).
def to_uncertain_func(x):
"""
Transforms x into an UncertainFunction-compatible object,
unless it is already an UncertainFunction (in which case x is returned
unchanged).
Raises an exception unless 'x' belongs to some specific classes of
objects that are known not to depend on UncertainFunction objects
(which then cannot be considered as constants).
"""
if isinstance(x, UncertainFunction):
return x
# ! In Python 2.6+, numbers.Number could be used instead, here:
elif isinstance(x, CONSTANT_TYPES):
# No variable => no derivative to define:
return UncertainFunction([x] * npts)
raise NotUpcast("%s cannot be converted to a number with" " uncertainty" % type(x)) |
A Beta random variate
Parameters
----------
alpha : scalar
The first shape parameter
beta : scalar
The second shape parameter
Optional
--------
low : scalar
Lower bound of the distribution support (default=0)
high : scalar
Upper bound of the distribution support (default=1)
def Beta(alpha, beta, low=0, high=1, tag=None):
"""
A Beta random variate
Parameters
----------
alpha : scalar
The first shape parameter
beta : scalar
The second shape parameter
Optional
--------
low : scalar
Lower bound of the distribution support (default=0)
high : scalar
Upper bound of the distribution support (default=1)
"""
assert (
alpha > 0 and beta > 0
), 'Beta "alpha" and "beta" parameters must be greater than zero'
assert low < high, 'Beta "low" must be less than "high"'
return uv(ss.beta(alpha, beta, loc=low, scale=high - low), tag=tag) |
A BetaPrime random variate
Parameters
----------
alpha : scalar
The first shape parameter
beta : scalar
The second shape parameter
def BetaPrime(alpha, beta, tag=None):
"""
A BetaPrime random variate
Parameters
----------
alpha : scalar
The first shape parameter
beta : scalar
The second shape parameter
"""
assert (
alpha > 0 and beta > 0
), 'BetaPrime "alpha" and "beta" parameters must be greater than zero'
x = Beta(alpha, beta, tag)
return x / (1 - x) |
A Bradford random variate
Parameters
----------
q : scalar
The shape parameter
low : scalar
The lower bound of the distribution (default=0)
high : scalar
The upper bound of the distribution (default=1)
def Bradford(q, low=0, high=1, tag=None):
"""
A Bradford random variate
Parameters
----------
q : scalar
The shape parameter
low : scalar
The lower bound of the distribution (default=0)
high : scalar
The upper bound of the distribution (default=1)
"""
assert q > 0, 'Bradford "q" parameter must be greater than zero'
assert low < high, 'Bradford "low" parameter must be less than "high"'
return uv(ss.bradford(q, loc=low, scale=high - low), tag=tag) |
A Burr random variate
Parameters
----------
c : scalar
The first shape parameter
k : scalar
The second shape parameter
def Burr(c, k, tag=None):
"""
A Burr random variate
Parameters
----------
c : scalar
The first shape parameter
k : scalar
The second shape parameter
"""
assert c > 0 and k > 0, 'Burr "c" and "k" parameters must be greater than zero'
return uv(ss.burr(c, k), tag=tag) |
A Chi-Squared random variate
Parameters
----------
k : int
The degrees of freedom of the distribution (must be greater than one)
def ChiSquared(k, tag=None):
"""
A Chi-Squared random variate
Parameters
----------
k : int
The degrees of freedom of the distribution (must be greater than one)
"""
assert int(k) == k and k >= 1, 'Chi-Squared "k" must be an integer greater than 0'
return uv(ss.chi2(k), tag=tag) |
An Erlang random variate.
This distribution is the same as a Gamma(k, theta) distribution, but
with the restriction that k must be a positive integer. This
is provided for greater compatibility with other simulation tools, but
provides no advantage over the Gamma distribution in its applications.
Parameters
----------
k : int
The shape parameter (must be a positive integer)
lamda : scalar
The scale parameter (must be greater than zero)
def Erlang(k, lamda, tag=None):
"""
An Erlang random variate.
This distribution is the same as a Gamma(k, theta) distribution, but
with the restriction that k must be a positive integer. This
is provided for greater compatibility with other simulation tools, but
provides no advantage over the Gamma distribution in its applications.
Parameters
----------
k : int
The shape parameter (must be a positive integer)
lamda : scalar
The scale parameter (must be greater than zero)
"""
assert int(k) == k and k > 0, 'Erlang "k" must be a positive integer'
assert lamda > 0, 'Erlang "lamda" must be greater than zero'
return Gamma(k, lamda, tag) |
An Exponential random variate
Parameters
----------
lamda : scalar
The inverse scale (as shown on Wikipedia). (FYI: mu = 1/lamda.)
def Exponential(lamda, tag=None):
"""
An Exponential random variate
Parameters
----------
lamda : scalar
The inverse scale (as shown on Wikipedia). (FYI: mu = 1/lamda.)
"""
assert lamda > 0, 'Exponential "lamda" must be greater than zero'
return uv(ss.expon(scale=1.0 / lamda), tag=tag) |
An Extreme Value Maximum random variate.
Parameters
----------
mu : scalar
The location parameter
sigma : scalar
The scale parameter (must be greater than zero)
def ExtValueMax(mu, sigma, tag=None):
"""
An Extreme Value Maximum random variate.
Parameters
----------
mu : scalar
The location parameter
sigma : scalar
The scale parameter (must be greater than zero)
"""
assert sigma > 0, 'ExtremeValueMax "sigma" must be greater than zero'
p = U(0, 1)._mcpts[:]
return UncertainFunction(mu - sigma * np.log(-np.log(p)), tag=tag) |
An F (fisher) random variate
Parameters
----------
d1 : int
Numerator degrees of freedom
d2 : int
Denominator degrees of freedom
def Fisher(d1, d2, tag=None):
"""
An F (fisher) random variate
Parameters
----------
d1 : int
Numerator degrees of freedom
d2 : int
Denominator degrees of freedom
"""
assert (
int(d1) == d1 and d1 >= 1
), 'Fisher (F) "d1" must be an integer greater than 0'
assert (
int(d2) == d2 and d2 >= 1
), 'Fisher (F) "d2" must be an integer greater than 0'
return uv(ss.f(d1, d2), tag=tag) |
A Gamma random variate
Parameters
----------
k : scalar
The shape parameter (must be positive and non-zero)
theta : scalar
The scale parameter (must be positive and non-zero)
def Gamma(k, theta, tag=None):
"""
A Gamma random variate
Parameters
----------
k : scalar
The shape parameter (must be positive and non-zero)
theta : scalar
The scale parameter (must be positive and non-zero)
"""
assert (
k > 0 and theta > 0
), 'Gamma "k" and "theta" parameters must be greater than zero'
return uv(ss.gamma(k, scale=theta), tag=tag) |
A Log-Normal random variate
Parameters
----------
mu : scalar
The location parameter
sigma : scalar
The scale parameter (must be positive and non-zero)
def LogNormal(mu, sigma, tag=None):
"""
A Log-Normal random variate
Parameters
----------
mu : scalar
The location parameter
sigma : scalar
The scale parameter (must be positive and non-zero)
"""
assert sigma > 0, 'Log-Normal "sigma" must be positive'
return uv(ss.lognorm(sigma, loc=mu), tag=tag) |
A Normal (or Gaussian) random variate
Parameters
----------
mu : scalar
The mean value of the distribution
sigma : scalar
The standard deviation (must be positive and non-zero)
def Normal(mu, sigma, tag=None):
"""
A Normal (or Gaussian) random variate
Parameters
----------
mu : scalar
The mean value of the distribution
sigma : scalar
The standard deviation (must be positive and non-zero)
"""
assert sigma > 0, 'Normal "sigma" must be greater than zero'
return uv(ss.norm(loc=mu, scale=sigma), tag=tag) |
A Pareto random variate (first kind)
Parameters
----------
q : scalar
The scale parameter
a : scalar
The shape parameter (the minimum possible value)
def Pareto(q, a, tag=None):
"""
A Pareto random variate (first kind)
Parameters
----------
q : scalar
The scale parameter
a : scalar
The shape parameter (the minimum possible value)
"""
assert q > 0 and a > 0, 'Pareto "q" and "a" must be positive scalars'
p = Uniform(0, 1, tag)
return a * (1 - p) ** (-1.0 / q) |
A Pareto random variate (second kind). This form always starts at the
origin.
Parameters
----------
q : scalar
The scale parameter
b : scalar
The shape parameter
def Pareto2(q, b, tag=None):
"""
A Pareto random variate (second kind). This form always starts at the
origin.
Parameters
----------
q : scalar
The scale parameter
b : scalar
The shape parameter
"""
assert q > 0 and b > 0, 'Pareto2 "q" and "b" must be positive scalars'
return Pareto(q, b, tag) - b |
A PERT random variate
Parameters
----------
low : scalar
Lower bound of the distribution support
peak : scalar
The location of the distribution's peak (low <= peak <= high)
high : scalar
Upper bound of the distribution support
Optional
--------
g : scalar
Controls the uncertainty of the distribution around the peak. Smaller
values make the distribution flatter and more uncertain around the
peak while larger values make it focused and less uncertain around
the peak. (Default: 4)
def PERT(low, peak, high, g=4.0, tag=None):
"""
A PERT random variate
Parameters
----------
low : scalar
Lower bound of the distribution support
peak : scalar
The location of the distribution's peak (low <= peak <= high)
high : scalar
Upper bound of the distribution support
Optional
--------
g : scalar
Controls the uncertainty of the distribution around the peak. Smaller
values make the distribution flatter and more uncertain around the
peak while larger values make it focused and less uncertain around
the peak. (Default: 4)
"""
a, b, c = [float(x) for x in [low, peak, high]]
assert a <= b <= c, 'PERT "peak" must be greater than "low" and less than "high"'
assert g >= 0, 'PERT "g" must be non-negative'
mu = (a + g * b + c) / (g + 2)
if mu == b:
a1 = a2 = 3.0
else:
a1 = ((mu - a) * (2 * b - a - c)) / ((b - mu) * (c - a))
a2 = a1 * (c - mu) / (mu - a)
return Beta(a1, a2, a, c, tag) |
A Student-T random variate
Parameters
----------
v : int
The degrees of freedom of the distribution (must be greater than one)
def StudentT(v, tag=None):
"""
A Student-T random variate
Parameters
----------
v : int
The degrees of freedom of the distribution (must be greater than one)
"""
assert int(v) == v and v >= 1, 'Student-T "v" must be an integer greater than 0'
return uv(ss.t(v), tag=tag) |
A triangular random variate
Parameters
----------
low : scalar
Lower bound of the distribution support
peak : scalar
The location of the triangle's peak (low <= peak <= high)
high : scalar
Upper bound of the distribution support
def Triangular(low, peak, high, tag=None):
"""
A triangular random variate
Parameters
----------
low : scalar
Lower bound of the distribution support
peak : scalar
The location of the triangle's peak (low <= peak <= high)
high : scalar
Upper bound of the distribution support
"""
assert low <= peak <= high, 'Triangular "peak" must lie between "low" and "high"'
low, peak, high = [float(x) for x in [low, peak, high]]
return uv(
ss.triang((1.0 * peak - low) / (high - low), loc=low, scale=(high - low)),
tag=tag,
) |
A Uniform random variate
Parameters
----------
low : scalar
Lower bound of the distribution support.
high : scalar
Upper bound of the distribution support.
def Uniform(low, high, tag=None):
"""
A Uniform random variate
Parameters
----------
low : scalar
Lower bound of the distribution support.
high : scalar
Upper bound of the distribution support.
"""
assert low < high, 'Uniform "low" must be less than "high"'
return uv(ss.uniform(loc=low, scale=high - low), tag=tag) |
A Weibull random variate
Parameters
----------
lamda : scalar
The scale parameter
k : scalar
The shape parameter
def Weibull(lamda, k, tag=None):
"""
A Weibull random variate
Parameters
----------
lamda : scalar
The scale parameter
k : scalar
The shape parameter
"""
assert (
lamda > 0 and k > 0
), 'Weibull "lamda" and "k" parameters must be greater than zero'
return uv(ss.exponweib(lamda, k), tag=tag) |
A Bernoulli random variate
Parameters
----------
p : scalar
The probability of success
def Bernoulli(p, tag=None):
"""
A Bernoulli random variate
Parameters
----------
p : scalar
The probability of success
"""
assert (
0 < p < 1
), 'Bernoulli probability "p" must be between zero and one, non-inclusive'
return uv(ss.bernoulli(p), tag=tag) |
A Binomial random variate
Parameters
----------
n : int
The number of trials
p : scalar
The probability of success
def Binomial(n, p, tag=None):
"""
A Binomial random variate
Parameters
----------
n : int
The number of trials
p : scalar
The probability of success
"""
assert (
int(n) == n and n > 0
), 'Binomial number of trials "n" must be an integer greater than zero'
assert (
0 < p < 1
), 'Binomial probability "p" must be between zero and one, non-inclusive'
return uv(ss.binom(n, p), tag=tag) |
A Geometric random variate
Parameters
----------
p : scalar
The probability of success
def Geometric(p, tag=None):
"""
A Geometric random variate
Parameters
----------
p : scalar
The probability of success
"""
assert (
0 < p < 1
), 'Geometric probability "p" must be between zero and one, non-inclusive'
return uv(ss.geom(p), tag=tag) |
A Hypergeometric random variate
Parameters
----------
N : int
The total population size
n : int
The number of individuals of interest in the population
K : int
The number of individuals that will be chosen from the population
Example
-------
(Taken from the wikipedia page) Assume we have an urn with two types of
marbles, 45 black ones and 5 white ones. Standing next to the urn, you
close your eyes and draw 10 marbles without replacement. What is the
probability that exactly 4 of the 10 are white?
::
>>> black = 45
>>> white = 5
>>> draw = 10
# Now we create the distribution
>>> h = H(black + white, white, draw)
# To check the probability, in this case, we can use the underlying
# scipy.stats object
>>> h.rv.pmf(4) # What is the probability that white count = 4?
0.0039645830580151975
def Hypergeometric(N, n, K, tag=None):
"""
A Hypergeometric random variate
Parameters
----------
N : int
The total population size
n : int
The number of individuals of interest in the population
K : int
The number of individuals that will be chosen from the population
Example
-------
(Taken from the wikipedia page) Assume we have an urn with two types of
marbles, 45 black ones and 5 white ones. Standing next to the urn, you
close your eyes and draw 10 marbles without replacement. What is the
probability that exactly 4 of the 10 are white?
::
>>> black = 45
>>> white = 5
>>> draw = 10
# Now we create the distribution
>>> h = H(black + white, white, draw)
# To check the probability, in this case, we can use the underlying
# scipy.stats object
>>> h.rv.pmf(4) # What is the probability that white count = 4?
0.0039645830580151975
"""
assert (
int(N) == N and N > 0
), 'Hypergeometric total population size "N" must be an integer greater than zero.'
assert (
int(n) == n and 0 < n <= N
), 'Hypergeometric interest population size "n" must be an integer greater than zero and no more than the total population size.'
assert (
int(K) == K and 0 < K <= N
), 'Hypergeometric chosen population size "K" must be an integer greater than zero and no more than the total population size.'
return uv(ss.hypergeom(N, n, K), tag=tag) |
A Poisson random variate
Parameters
----------
lamda : scalar
The rate of an occurance within a specified interval of time or space.
def Poisson(lamda, tag=None):
"""
A Poisson random variate
Parameters
----------
lamda : scalar
The rate of an occurance within a specified interval of time or space.
"""
assert lamda > 0, 'Poisson "lamda" must be greater than zero.'
return uv(ss.poisson(lamda), tag=tag) |
Calculate the covariance matrix of uncertain variables, oriented by the
order of the inputs
Parameters
----------
nums_with_uncert : array-like
A list of variables that have an associated uncertainty
Returns
-------
cov_matrix : 2d-array-like
A nested list containing covariance values
Example
-------
>>> x = N(1, 0.1)
>>> y = N(10, 0.1)
>>> z = x + 2*y
>>> covariance_matrix([x,y,z])
[[ 9.99694861e-03 2.54000840e-05 1.00477488e-02]
[ 2.54000840e-05 9.99823207e-03 2.00218642e-02]
[ 1.00477488e-02 2.00218642e-02 5.00914772e-02]]
def covariance_matrix(nums_with_uncert):
"""
Calculate the covariance matrix of uncertain variables, oriented by the
order of the inputs
Parameters
----------
nums_with_uncert : array-like
A list of variables that have an associated uncertainty
Returns
-------
cov_matrix : 2d-array-like
A nested list containing covariance values
Example
-------
>>> x = N(1, 0.1)
>>> y = N(10, 0.1)
>>> z = x + 2*y
>>> covariance_matrix([x,y,z])
[[ 9.99694861e-03 2.54000840e-05 1.00477488e-02]
[ 2.54000840e-05 9.99823207e-03 2.00218642e-02]
[ 1.00477488e-02 2.00218642e-02 5.00914772e-02]]
"""
ufuncs = list(map(to_uncertain_func, nums_with_uncert))
cov_matrix = []
for (i1, expr1) in enumerate(ufuncs):
coefs_expr1 = []
mean1 = expr1.mean
for (i2, expr2) in enumerate(ufuncs[: i1 + 1]):
mean2 = expr2.mean
coef = np.mean((expr1._mcpts - mean1) * (expr2._mcpts - mean2))
coefs_expr1.append(coef)
cov_matrix.append(coefs_expr1)
# We symmetrize the matrix:
for (i, covariance_coefs) in enumerate(cov_matrix):
covariance_coefs.extend(cov_matrix[j][i] for j in range(i + 1, len(cov_matrix)))
return cov_matrix |
Calculate the correlation matrix of uncertain variables, oriented by the
order of the inputs
Parameters
----------
nums_with_uncert : array-like
A list of variables that have an associated uncertainty
Returns
-------
corr_matrix : 2d-array-like
A nested list containing covariance values
Example
-------
>>> x = N(1, 0.1)
>>> y = N(10, 0.1)
>>> z = x + 2*y
>>> correlation_matrix([x,y,z])
[[ 0.99969486 0.00254001 0.4489385 ]
[ 0.00254001 0.99982321 0.89458702]
[ 0.4489385 0.89458702 1. ]]
def correlation_matrix(nums_with_uncert):
"""
Calculate the correlation matrix of uncertain variables, oriented by the
order of the inputs
Parameters
----------
nums_with_uncert : array-like
A list of variables that have an associated uncertainty
Returns
-------
corr_matrix : 2d-array-like
A nested list containing covariance values
Example
-------
>>> x = N(1, 0.1)
>>> y = N(10, 0.1)
>>> z = x + 2*y
>>> correlation_matrix([x,y,z])
[[ 0.99969486 0.00254001 0.4489385 ]
[ 0.00254001 0.99982321 0.89458702]
[ 0.4489385 0.89458702 1. ]]
"""
ufuncs = list(map(to_uncertain_func, nums_with_uncert))
data = np.vstack([ufunc._mcpts for ufunc in ufuncs])
return np.corrcoef(data.T, rowvar=0) |
Variance value as a result of an uncertainty calculation
def var(self):
"""
Variance value as a result of an uncertainty calculation
"""
mn = self.mean
vr = np.mean((self._mcpts - mn) ** 2)
return vr |
r"""
Skewness coefficient value as a result of an uncertainty calculation,
defined as::
_____ m3
\/beta1 = ------
std**3
where m3 is the third central moment and std is the standard deviation
def skew(self):
r"""
Skewness coefficient value as a result of an uncertainty calculation,
defined as::
_____ m3
\/beta1 = ------
std**3
where m3 is the third central moment and std is the standard deviation
"""
mn = self.mean
sd = self.std
sk = 0.0 if abs(sd) <= 1e-8 else np.mean((self._mcpts - mn) ** 3) / sd ** 3
return sk |
Kurtosis coefficient value as a result of an uncertainty calculation,
defined as::
m4
beta2 = ------
std**4
where m4 is the fourth central moment and std is the standard deviation
def kurt(self):
"""
Kurtosis coefficient value as a result of an uncertainty calculation,
defined as::
m4
beta2 = ------
std**4
where m4 is the fourth central moment and std is the standard deviation
"""
mn = self.mean
sd = self.std
kt = 0.0 if abs(sd) <= 1e-8 else np.mean((self._mcpts - mn) ** 4) / sd ** 4
return kt |
The first four standard moments of a distribution: mean, variance, and
standardized skewness and kurtosis coefficients.
def stats(self):
"""
The first four standard moments of a distribution: mean, variance, and
standardized skewness and kurtosis coefficients.
"""
mn = self.mean
vr = self.var
sk = self.skew
kt = self.kurt
return [mn, vr, sk, kt] |
Get the distribution value at a given percentile or set of percentiles.
This follows the NIST method for calculating percentiles.
Parameters
----------
val : scalar or array
Either a single value or an array of values between 0 and 1.
Returns
-------
out : scalar or array
The actual distribution value that appears at the requested
percentile value or values
def percentile(self, val):
"""
Get the distribution value at a given percentile or set of percentiles.
This follows the NIST method for calculating percentiles.
Parameters
----------
val : scalar or array
Either a single value or an array of values between 0 and 1.
Returns
-------
out : scalar or array
The actual distribution value that appears at the requested
percentile value or values
"""
try:
# test to see if an input is given as an array
out = [self.percentile(vi) for vi in val]
except (ValueError, TypeError):
if val <= 0:
out = float(min(self._mcpts))
elif val >= 1:
out = float(max(self._mcpts))
else:
tmp = np.sort(self._mcpts)
n = val * (len(tmp) + 1)
k, d = int(n), n - int(n)
out = float(tmp[k] + d * (tmp[k + 1] - tmp[k]))
if isinstance(val, np.ndarray):
out = np.array(out)
return out |
Cleanly show what the four displayed distribution moments are:
- Mean
- Variance
- Standardized Skewness Coefficient
- Standardized Kurtosis Coefficient
For a standard Normal distribution, these are [0, 1, 0, 3].
If the object has an associated tag, this is presented. If the optional
``name`` kwarg is utilized, this is presented as with the moments.
Otherwise, no unique name is presented.
Example
=======
::
>>> x = N(0, 1, 'x')
>>> x.describe() # print tag since assigned
MCERP Uncertain Value (x):
...
>>> x.describe('foobar') # 'name' kwarg takes precedence
MCERP Uncertain Value (foobar):
...
>>> y = x**2
>>> y.describe('y') # print name since assigned
MCERP Uncertain Value (y):
...
>>> y.describe() # print nothing since no tag
MCERP Uncertain Value:
...
def describe(self, name=None):
"""
Cleanly show what the four displayed distribution moments are:
- Mean
- Variance
- Standardized Skewness Coefficient
- Standardized Kurtosis Coefficient
For a standard Normal distribution, these are [0, 1, 0, 3].
If the object has an associated tag, this is presented. If the optional
``name`` kwarg is utilized, this is presented as with the moments.
Otherwise, no unique name is presented.
Example
=======
::
>>> x = N(0, 1, 'x')
>>> x.describe() # print tag since assigned
MCERP Uncertain Value (x):
...
>>> x.describe('foobar') # 'name' kwarg takes precedence
MCERP Uncertain Value (foobar):
...
>>> y = x**2
>>> y.describe('y') # print name since assigned
MCERP Uncertain Value (y):
...
>>> y.describe() # print nothing since no tag
MCERP Uncertain Value:
...
"""
mn, vr, sk, kt = self.stats
if name is not None:
s = "MCERP Uncertain Value (" + name + "):\n"
elif self.tag is not None:
s = "MCERP Uncertain Value (" + self.tag + "):\n"
else:
s = "MCERP Uncertain Value:\n"
s += " > Mean................... {: }\n".format(mn)
s += " > Variance............... {: }\n".format(vr)
s += " > Skewness Coefficient... {: }\n".format(sk)
s += " > Kurtosis Coefficient... {: }\n".format(kt)
print(s) |
Plot the distribution of the UncertainFunction. By default, the
distribution is shown with a kernel density estimate (kde).
Optional
--------
hist : bool
If true, a density histogram is displayed (histtype='stepfilled')
show : bool
If ``True``, the figure will be displayed after plotting the
distribution. If ``False``, an explicit call to ``plt.show()`` is
required to display the figure.
kwargs : any valid matplotlib.pyplot.plot or .hist kwarg
def plot(self, hist=False, show=False, **kwargs):
"""
Plot the distribution of the UncertainFunction. By default, the
distribution is shown with a kernel density estimate (kde).
Optional
--------
hist : bool
If true, a density histogram is displayed (histtype='stepfilled')
show : bool
If ``True``, the figure will be displayed after plotting the
distribution. If ``False``, an explicit call to ``plt.show()`` is
required to display the figure.
kwargs : any valid matplotlib.pyplot.plot or .hist kwarg
"""
import matplotlib.pyplot as plt
vals = self._mcpts
low = min(vals)
high = max(vals)
p = ss.kde.gaussian_kde(vals)
xp = np.linspace(low, high, 100)
if hist:
h = plt.hist(
vals,
bins=int(np.sqrt(len(vals)) + 0.5),
histtype="stepfilled",
normed=True,
**kwargs
)
plt.ylim(0, 1.1 * h[0].max())
else:
plt.plot(xp, p.evaluate(xp), **kwargs)
plt.xlim(low - (high - low) * 0.1, high + (high - low) * 0.1)
if show:
self.show() |
Plot the distribution of the UncertainVariable. Continuous
distributions are plotted with a line plot and discrete distributions
are plotted with discrete circles.
Optional
--------
hist : bool
If true, a histogram is displayed
show : bool
If ``True``, the figure will be displayed after plotting the
distribution. If ``False``, an explicit call to ``plt.show()`` is
required to display the figure.
kwargs : any valid matplotlib.pyplot.plot kwarg
def plot(self, hist=False, show=False, **kwargs):
"""
Plot the distribution of the UncertainVariable. Continuous
distributions are plotted with a line plot and discrete distributions
are plotted with discrete circles.
Optional
--------
hist : bool
If true, a histogram is displayed
show : bool
If ``True``, the figure will be displayed after plotting the
distribution. If ``False``, an explicit call to ``plt.show()`` is
required to display the figure.
kwargs : any valid matplotlib.pyplot.plot kwarg
"""
import matplotlib.pyplot as plt
if hist:
vals = self._mcpts
low = vals.min()
high = vals.max()
h = plt.hist(
vals,
bins=int(np.sqrt(len(vals)) + 0.5),
histtype="stepfilled",
normed=True,
**kwargs
)
plt.ylim(0, 1.1 * h[0].max())
else:
bound = 0.0001
low = self.rv.ppf(bound)
high = self.rv.ppf(1 - bound)
if hasattr(self.rv.dist, "pmf"):
low = int(low)
high = int(high)
vals = list(range(low, high + 1))
plt.plot(vals, self.rv.pmf(vals), "o", **kwargs)
else:
vals = np.linspace(low, high, 500)
plt.plot(vals, self.rv.pdf(vals), **kwargs)
plt.xlim(low - (high - low) * 0.1, high + (high - low) * 0.1)
if show:
self.show() |
Loads the hat from a picture at path.
Args:
path: The path to load from
Returns:
The hat data.
def load_hat(self, path): # pylint: disable=no-self-use
"""Loads the hat from a picture at path.
Args:
path: The path to load from
Returns:
The hat data.
"""
hat = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if hat is None:
raise ValueError('No hat image found at `{}`'.format(path))
b, g, r, a = cv2.split(hat)
return cv2.merge((r, g, b, a)) |
Uses a haarcascade to detect faces inside an image.
Args:
image: The image.
draw_box: If True, the image will be marked with a rectangle.
Return:
The faces as returned by OpenCV's detectMultiScale method for
cascades.
def find_faces(self, image, draw_box=False):
"""Uses a haarcascade to detect faces inside an image.
Args:
image: The image.
draw_box: If True, the image will be marked with a rectangle.
Return:
The faces as returned by OpenCV's detectMultiScale method for
cascades.
"""
frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
faces = self.cascade.detectMultiScale(
frame_gray,
scaleFactor=1.3,
minNeighbors=5,
minSize=(50, 50),
flags=0)
if draw_box:
for x, y, w, h in faces:
cv2.rectangle(image, (x, y),
(x + w, y + h), (0, 255, 0), 2)
return faces |
Find instances of `rsrc_type` that match the filter in `**kwargs`
def find_resources(self, rsrc_type, sort=None, yield_pages=False, **kwargs):
"""Find instances of `rsrc_type` that match the filter in `**kwargs`"""
return rsrc_type.find(self, sort=sort, yield_pages=yield_pages, **kwargs) |
Marks the object as changed.
If a `parent` attribute is set, the `changed()` method on the parent
will be called, propagating the change notification up the chain.
The message (if provided) will be debug logged.
def changed(self, message=None, *args):
"""Marks the object as changed.
If a `parent` attribute is set, the `changed()` method on the parent
will be called, propagating the change notification up the chain.
The message (if provided) will be debug logged.
"""
if message is not None:
self.logger.debug('%s: %s', self._repr(), message % args)
self.logger.debug('%s: changed', self._repr())
if self.parent is not None:
self.parent.changed()
elif isinstance(self, Mutable):
super(TrackedObject, self).changed() |
Decorator for mutation tracker registration.
The provided `origin_type` is mapped to the decorated class such that
future calls to `convert()` will convert the object of `origin_type`
to an instance of the decorated class.
def register(cls, origin_type):
"""Decorator for mutation tracker registration.
The provided `origin_type` is mapped to the decorated class such that
future calls to `convert()` will convert the object of `origin_type`
to an instance of the decorated class.
"""
def decorator(tracked_type):
"""Adds the decorated class to the `_type_mapping` dictionary."""
cls._type_mapping[origin_type] = tracked_type
return tracked_type
return decorator |
Converts objects to registered tracked types
This checks the type of the given object against the registered tracked
types. When a match is found, the given object will be converted to the
tracked type, its parent set to the provided parent, and returned.
If its type does not occur in the registered types mapping, the object
is returned unchanged.
def convert(cls, obj, parent):
"""Converts objects to registered tracked types
This checks the type of the given object against the registered tracked
types. When a match is found, the given object will be converted to the
tracked type, its parent set to the provided parent, and returned.
If its type does not occur in the registered types mapping, the object
is returned unchanged.
"""
replacement_type = cls._type_mapping.get(type(obj))
if replacement_type is not None:
new = replacement_type(obj)
new.parent = parent
return new
return obj |
Generator like `convert_iterable`, but for 2-tuple iterators.
def convert_items(self, items):
"""Generator like `convert_iterable`, but for 2-tuple iterators."""
return ((key, self.convert(value, self)) for key, value in items) |
Convenience method to track either a dict or a 2-tuple iterator.
def convert_mapping(self, mapping):
"""Convenience method to track either a dict or a 2-tuple iterator."""
if isinstance(mapping, dict):
return self.convert_items(iteritems(mapping))
return self.convert_items(mapping) |
If we only have a single preference object redirect to it,
otherwise display listing.
def changelist_view(self, request, extra_context=None):
"""
If we only have a single preference object redirect to it,
otherwise display listing.
"""
model = self.model
if model.objects.all().count() > 1:
return super(PreferencesAdmin, self).changelist_view(request)
else:
obj = model.singleton.get()
return redirect(
reverse(
'admin:%s_%s_change' % (
model._meta.app_label, model._meta.model_name
),
args=(obj.id,)
)
) |
Only converts headers
def md2rst(md_lines):
'Only converts headers'
lvl2header_char = {1: '=', 2: '-', 3: '~'}
for md_line in md_lines:
if md_line.startswith('#'):
header_indent, header_text = md_line.split(' ', 1)
yield header_text
header_char = lvl2header_char[len(header_indent)]
yield header_char * len(header_text)
else:
yield md_line |
Function decorator to transform a generator into a list
def aslist(generator):
'Function decorator to transform a generator into a list'
def wrapper(*args, **kwargs):
return list(generator(*args, **kwargs))
return wrapper |
No classifier-based selection of Python packages is currently implemented: for now we don't fetch any .whl or .egg
Eventually, we should select the best release available, based on the classifier & PEP 425: https://www.python.org/dev/peps/pep-0425/
E.g. a wheel when available but NOT for tornado 4.3 for example, where available wheels are only for Windows.
Note also that some packages don't have .whl distributed, e.g. https://bugs.launchpad.net/lxml/+bug/1176147
def get_package_release_from_pypi(pkg_name, version, pypi_json_api_url, allowed_classifiers):
"""
No classifier-based selection of Python packages is currently implemented: for now we don't fetch any .whl or .egg
Eventually, we should select the best release available, based on the classifier & PEP 425: https://www.python.org/dev/peps/pep-0425/
E.g. a wheel when available but NOT for tornado 4.3 for example, where available wheels are only for Windows.
Note also that some packages don't have .whl distributed, e.g. https://bugs.launchpad.net/lxml/+bug/1176147
"""
matching_releases = get_package_releases_matching_version(pkg_name, version, pypi_json_api_url)
src_releases = [release for release in matching_releases if release['python_version'] == 'source']
if src_releases:
return select_src_release(src_releases, pkg_name, target_classifiers=('py2.py3-none-any',), select_arbitrary_version_if_none_match=True)
if allowed_classifiers:
return select_src_release(matching_releases, pkg_name, target_classifiers=allowed_classifiers)
raise PypiQueryError('No source supported found for package {} version {}'.format(pkg_name, version)) |
Returns a PEP425-compliant classifier (or 'py2.py3-none-any' if it cannot be extracted),
and the file extension
TODO: return a classifier 3-members namedtuple instead of a single string
def extract_classifier_and_extension(pkg_name, filename):
"""
Returns a PEP425-compliant classifier (or 'py2.py3-none-any' if it cannot be extracted),
and the file extension
TODO: return a classifier 3-members namedtuple instead of a single string
"""
basename, _, extension = filename.rpartition('.')
if extension == 'gz' and filename.endswith('.tar.gz'):
extension = 'tar.gz'
basename = filename[:-7]
if basename == pkg_name or basename[len(pkg_name)] != '-':
return 'py2.py3-none-any', extension
basename = basename[len(pkg_name)+1:]
classifier_parts = basename.split('-')
if len(classifier_parts) < 3:
return 'py2.py3-none-any', extension
if len(classifier_parts) == 3:
_, _, classifier_parts[0] = classifier_parts[0].rpartition('.')
return '-'.join(classifier_parts[-3:]), extension |
Convert plain dictionary to NestedMutable.
def coerce(cls, key, value):
"""Convert plain dictionary to NestedMutable."""
if value is None:
return value
if isinstance(value, cls):
return value
if isinstance(value, dict):
return NestedMutableDict.coerce(key, value)
if isinstance(value, list):
return NestedMutableList.coerce(key, value)
return super(cls).coerce(key, value) |
Checks if a function in a module was declared in that module.
http://stackoverflow.com/a/1107150/3004221
Args:
mod: the module
fun: the function
def is_mod_function(mod, fun):
"""Checks if a function in a module was declared in that module.
http://stackoverflow.com/a/1107150/3004221
Args:
mod: the module
fun: the function
"""
return inspect.isfunction(fun) and inspect.getmodule(fun) == mod |
Checks if a class in a module was declared in that module.
Args:
mod: the module
cls: the class
def is_mod_class(mod, cls):
"""Checks if a class in a module was declared in that module.
Args:
mod: the module
cls: the class
"""
return inspect.isclass(cls) and inspect.getmodule(cls) == mod |
Lists all functions declared in a module.
http://stackoverflow.com/a/1107150/3004221
Args:
mod_name: the module name
Returns:
A list of functions declared in that module.
def list_functions(mod_name):
"""Lists all functions declared in a module.
http://stackoverflow.com/a/1107150/3004221
Args:
mod_name: the module name
Returns:
A list of functions declared in that module.
"""
mod = sys.modules[mod_name]
return [func.__name__ for func in mod.__dict__.values()
if is_mod_function(mod, func)] |
Lists all classes declared in a module.
Args:
mod_name: the module name
Returns:
A list of functions declared in that module.
def list_classes(mod_name):
"""Lists all classes declared in a module.
Args:
mod_name: the module name
Returns:
A list of functions declared in that module.
"""
mod = sys.modules[mod_name]
return [cls.__name__ for cls in mod.__dict__.values()
if is_mod_class(mod, cls)] |
Returns a dictionary which maps function names to line numbers.
Args:
functions: a list of function names
module: the module to look the functions up
searchstr: the string to search for
Returns:
A dictionary with functions as keys and their line numbers as values.
def get_linenumbers(functions, module, searchstr='def {}(image):\n'):
"""Returns a dictionary which maps function names to line numbers.
Args:
functions: a list of function names
module: the module to look the functions up
searchstr: the string to search for
Returns:
A dictionary with functions as keys and their line numbers as values.
"""
lines = inspect.getsourcelines(module)[0]
line_numbers = {}
for function in functions:
try:
line_numbers[function] = lines.index(
searchstr.format(function)) + 1
except ValueError:
print(r'Can not find `{}`'.format(searchstr.format(function)))
line_numbers[function] = 0
return line_numbers |
Formats the documentation in a nicer way and for notebook cells.
def format_doc(fun):
"""Formats the documentation in a nicer way and for notebook cells."""
SEPARATOR = '============================='
func = cvloop.functions.__dict__[fun]
doc_lines = ['{}'.format(l).strip() for l in func.__doc__.split('\n')]
if hasattr(func, '__init__'):
doc_lines.append(SEPARATOR)
doc_lines += ['{}'.format(l).strip() for l in
func.__init__.__doc__.split('\n')]
mod_lines = []
argblock = False
returnblock = False
for line in doc_lines:
if line == SEPARATOR:
mod_lines.append('\n#### `{}.__init__(...)`:\n\n'.format(fun))
elif 'Args:' in line:
argblock = True
if GENERATE_ARGS:
mod_lines.append('**{}**\n'.format(line))
elif 'Returns:' in line:
returnblock = True
mod_lines.append('\n**{}**'.format(line))
elif not argblock and not returnblock:
mod_lines.append('{}\n'.format(line))
elif argblock and not returnblock and ':' in line:
if GENERATE_ARGS:
mod_lines.append('- *{}:* {}\n'.format(
*line.split(':')))
elif returnblock:
mod_lines.append(line)
else:
mod_lines.append('{}\n'.format(line))
return mod_lines |
Main function creates the cvloop.functions example notebook.
def main():
"""Main function creates the cvloop.functions example notebook."""
notebook = {
'cells': [
{
'cell_type': 'markdown',
'metadata': {},
'source': [
'# cvloop functions\n\n',
'This notebook shows an overview over all cvloop ',
'functions provided in the [`cvloop.functions` module](',
'https://github.com/shoeffner/cvloop/blob/',
'develop/cvloop/functions.py).'
]
},
],
'nbformat': 4,
'nbformat_minor': 1,
'metadata': {
'language_info': {
'codemirror_mode': {
'name': 'ipython',
'version': 3
},
'file_extension': '.py',
'mimetype': 'text/x-python',
'name': 'python',
'nbconvert_exporter': 'python',
'pygments_lexer': 'ipython3',
'version': '3.5.1+'
}
}
}
classes = list_classes('cvloop.functions')
functions = list_functions('cvloop.functions')
line_numbers_cls = get_linenumbers(classes, cvloop.functions,
'class {}:\n')
line_numbers = get_linenumbers(functions, cvloop.functions)
for cls in classes:
line_number = line_numbers_cls[cls]
notebook['cells'].append(create_description_cell(cls, line_number))
notebook['cells'].append(create_code_cell(cls, isclass=True))
for func in functions:
line_number = line_numbers[func]
notebook['cells'].append(create_description_cell(func, line_number))
notebook['cells'].append(create_code_cell(func))
with open(sys.argv[1], 'w') as nfile:
json.dump(notebook, nfile, indent=4) |
Prepares an axes object for clean plotting.
Removes x and y axes labels and ticks, sets the aspect ratio to be
equal, uses the size to determine the drawing area and fills the image
with random colors as visual feedback.
Creates an AxesImage to be shown inside the axes object and sets the
needed properties.
Args:
axes: The axes object to modify.
title: The title.
size: The size of the expected image.
cmap: The colormap if a custom color map is needed.
(Default: None)
Returns:
The AxesImage's handle.
def prepare_axes(axes, title, size, cmap=None):
"""Prepares an axes object for clean plotting.
Removes x and y axes labels and ticks, sets the aspect ratio to be
equal, uses the size to determine the drawing area and fills the image
with random colors as visual feedback.
Creates an AxesImage to be shown inside the axes object and sets the
needed properties.
Args:
axes: The axes object to modify.
title: The title.
size: The size of the expected image.
cmap: The colormap if a custom color map is needed.
(Default: None)
Returns:
The AxesImage's handle.
"""
if axes is None:
return None
# prepare axis itself
axes.set_xlim([0, size[1]])
axes.set_ylim([size[0], 0])
axes.set_aspect('equal')
axes.axis('off')
if isinstance(cmap, str):
title = '{} (cmap: {})'.format(title, cmap)
axes.set_title(title)
# prepare image data
axes_image = image.AxesImage(axes, cmap=cmap,
extent=(0, size[1], size[0], 0))
axes_image.set_data(np.random.random((size[0], size[1], 3)))
axes.add_image(axes_image)
return axes_image |
Connects event handlers to the figure.
def connect_event_handlers(self):
"""Connects event handlers to the figure."""
self.figure.canvas.mpl_connect('close_event', self.evt_release)
self.figure.canvas.mpl_connect('pause_event', self.evt_toggle_pause) |
Pauses and resumes the video source.
def evt_toggle_pause(self, *args): # pylint: disable=unused-argument
"""Pauses and resumes the video source."""
if self.event_source._timer is None: # noqa: e501 pylint: disable=protected-access
self.event_source.start()
else:
self.event_source.stop() |
Prints information about the unprocessed image.
Reads one frame from the source to determine image colors, dimensions
and data types.
Args:
capture: the source to read from.
def print_info(self, capture):
"""Prints information about the unprocessed image.
Reads one frame from the source to determine image colors, dimensions
and data types.
Args:
capture: the source to read from.
"""
self.frame_offset += 1
ret, frame = capture.read()
if ret:
print('Capture Information')
print('\tDimensions (HxW): {}x{}'.format(*frame.shape[0:2]))
print('\tColor channels: {}'.format(frame.shape[2] if
len(frame.shape) > 2 else 1))
print('\tColor range: {}-{}'.format(np.min(frame),
np.max(frame)))
print('\tdtype: {}'.format(frame.dtype))
else:
print('No source found.') |
Determines the height and width of the image source.
If no dimensions are available, this method defaults to a resolution of
640x480, thus returns (480, 640).
If capture has a get method it is assumed to understand
`cv2.CAP_PROP_FRAME_WIDTH` and `cv2.CAP_PROP_FRAME_HEIGHT` to get the
information. Otherwise it reads one frame from the source to determine
image dimensions.
Args:
capture: the source to read from.
Returns:
A tuple containing integers of height and width (simple casts).
def determine_size(self, capture):
"""Determines the height and width of the image source.
If no dimensions are available, this method defaults to a resolution of
640x480, thus returns (480, 640).
If capture has a get method it is assumed to understand
`cv2.CAP_PROP_FRAME_WIDTH` and `cv2.CAP_PROP_FRAME_HEIGHT` to get the
information. Otherwise it reads one frame from the source to determine
image dimensions.
Args:
capture: the source to read from.
Returns:
A tuple containing integers of height and width (simple casts).
"""
width = 640
height = 480
if capture and hasattr(capture, 'get'):
width = capture.get(cv2.CAP_PROP_FRAME_WIDTH)
height = capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
else:
self.frame_offset += 1
ret, frame = capture.read()
if ret:
width = frame.shape[1]
height = frame.shape[0]
return (int(height), int(width)) |
Initializes the drawing of the frames by setting the images to
random colors.
This function is called by TimedAnimation.
def _init_draw(self):
"""Initializes the drawing of the frames by setting the images to
random colors.
This function is called by TimedAnimation.
"""
if self.original is not None:
self.original.set_data(np.random.random((10, 10, 3)))
self.processed.set_data(np.random.random((10, 10, 3))) |
Reads a frame and converts the color if needed.
In case no frame is available, i.e. self.capture.read() returns False
as the first return value, the event_source of the TimedAnimation is
stopped, and if possible the capture source released.
Returns:
None if stopped, otherwise the color converted source image.
def read_frame(self):
"""Reads a frame and converts the color if needed.
In case no frame is available, i.e. self.capture.read() returns False
as the first return value, the event_source of the TimedAnimation is
stopped, and if possible the capture source released.
Returns:
None if stopped, otherwise the color converted source image.
"""
ret, frame = self.capture.read()
if not ret:
self.event_source.stop()
try:
self.capture.release()
except AttributeError:
# has no release method, thus just pass
pass
return None
if self.convert_color != -1 and is_color_image(frame):
return cv2.cvtColor(frame, self.convert_color)
return frame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.