repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
scivision/gridaurora
|
gridaurora/__init__.py
|
chapman_profile
|
python
|
def chapman_profile(Z0: float, zKM: np.ndarray, H: float):
return np.exp(.5*(1-(zKM-Z0)/H - np.exp((Z0-zKM)/H)))
|
Z0: altitude [km] of intensity peak
zKM: altitude grid [km]
H: scale height [km]
example:
pz = chapman_profile(110,np.arange(90,200,1),20)
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/__init__.py#L64-L73
| null |
from datetime import datetime, date
from dateutil.parser import parse
import numpy as np
import logging
from typing import Union
def toyearmon(time: datetime) -> int:
# %% date handle
if isinstance(time, (tuple, list, np.ndarray)):
logging.warning(f'taking only first time {time[0]}, would you like multiple times upgrade to code?')
time = time[0]
if isinstance(time, str):
time = parse(time)
elif isinstance(time, np.datetime64):
time = time.astype(datetime)
elif isinstance(time, (datetime, date)):
pass
else:
raise TypeError(f'not sure what to do with type {type(time)}')
ym = int(f'{time.year:d}{time.month:02d}')
return ym
def to_ut1unix(time: Union[str, datetime, float, np.ndarray]) -> np.ndarray:
"""
converts time inputs to UT1 seconds since Unix epoch
"""
# keep this order
time = totime(time)
if isinstance(time, (float, int)):
return time
if isinstance(time, (tuple, list, np.ndarray)):
assert isinstance(time[0], datetime), f'expected datetime, not {type(time[0])}'
return np.array(list(map(dt2ut1, time)))
else:
assert isinstance(time, datetime)
return dt2ut1(time)
def dt2ut1(t: datetime) -> float:
epoch = datetime(1970, 1, 1)
assert isinstance(t, datetime)
return (t-epoch).total_seconds()
def totime(time: Union[str, datetime, np.datetime64]) -> np.ndarray:
time = np.atleast_1d(time)
if isinstance(time[0], (datetime, np.datetime64)):
pass
elif isinstance(time[0], str):
time = np.atleast_1d(list(map(parse, time)))
return time.squeeze()[()]
|
scivision/gridaurora
|
gridaurora/calcemissions.py
|
calcemissions
|
python
|
def calcemissions(rates: xarray.DataArray, sim) -> Tuple[xarray.DataArray, np.ndarray, np.ndarray]:
if not sim.reacreq:
return 0., 0., 0.
ver = None
lamb = None
br = None
# %% METASTABLE
if 'metastable' in sim.reacreq:
ver, lamb, br = getMetastable(rates, ver, lamb, br, sim.reactionfn)
# %% PROMPT ATOMIC OXYGEN EMISSIONS
if 'atomic' in sim.reacreq:
ver, lamb, br = getAtomic(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1N EMISSIONS
if 'n21ng' in sim.reacreq:
ver, lamb, br = getN21NG(rates, ver, lamb, br, sim.reactionfn)
# %% N2+ Meinel band
if 'n2meinel' in sim.reacreq:
ver, lamb, br = getN2meinel(rates, ver, lamb, br, sim.reactionfn)
# %% N2 2P (after Vallance Jones, 1974)
if 'n22pg' in sim.reacreq:
ver, lamb, br = getN22PG(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1P
if 'n21pg' in sim.reacreq:
ver, lamb, br = getN21PG(rates, ver, lamb, br, sim.reactionfn)
# %% Remove NaN wavelength entries
if ver is None:
raise ValueError('you have not selected any reactions to generate VER')
# %% sort by wavelength, eliminate NaN
lamb, ver, br = sortelimlambda(lamb, ver, br)
# %% assemble output
dfver = xarray.DataArray(data=ver, coords=[('alt_km', rates.alt_km),
('wavelength_nm', lamb)])
return dfver, ver, br
|
Franck-Condon factor
http://chemistry.illinoisstate.edu/standard/che460/handouts/460-Feb28lec-S13.pdf
http://assign3.chem.usyd.edu.au/spectroscopy/index.php
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L20-L59
|
[
"def sortelimlambda(lamb, ver, br):\n assert lamb.ndim == 1\n assert lamb.size == ver.shape[-1]\n# %% eliminate unused wavelengths and Einstein coeff\n mask = np.isfinite(lamb)\n ver = ver[..., mask]\n lamb = lamb[mask]\n br = br[:, mask]\n# %% sort by lambda\n lambSortInd = lamb.argsort() # lamb is made piecemeal and is overall non-monotonic\n\n return lamb[lambSortInd], ver[..., lambSortInd], br[:, lambSortInd] # sort by wavelength ascending order\n",
"def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):\n with h5py.File(reactfn, 'r') as f:\n A = f['/metastable/A'][:]\n lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!\n\n \"\"\"\n concatenate along the reaction dimension, axis=-1\n \"\"\"\n vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],\n A[2:4] * rates.loc[..., 'no1d'].values[:, None],\n A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)\n\n assert vnew.shape == (rates.shape[0], A.size)\n\n return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)\n",
"def getAtomic(rates, ver, lamb, br, reactfn):\n \"\"\" prompt atomic emissions (nm)\n 844.6 777.4\n \"\"\"\n with h5py.File(reactfn, 'r') as f:\n lambnew = f['/atomic/lambda'].value.ravel(order='F') # some are not 1-D!\n\n vnew = np.concatenate((rates.loc[..., 'po3p3p'].values[..., None],\n rates.loc[..., 'po3p5p'].values[..., None]), axis=-1)\n\n return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)\n",
"def getN21NG(rates, ver, lamb, br, reactfn):\n \"\"\"\n excitation Franck-Condon factors (derived from Vallance Jones, 1974)\n \"\"\"\n with h5py.File(str(reactfn), 'r', libver='latest') as f:\n A = f['/N2+1NG/A'].value\n lambdaA = f['/N2+1NG/lambda'].value.ravel(order='F')\n franckcondon = f['/N2+1NG/fc'].value\n\n return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p1ng'], lamb, ver, rates.alt_km, br)\n",
"def getN2meinel(rates, ver, lamb, br, reactfn):\n with h5py.File(str(reactfn), 'r', libver='latest') as f:\n A = f['/N2+Meinel/A'].value\n lambdaA = f['/N2+Meinel/lambda'].value.ravel(order='F')\n franckcondon = f['/N2+Meinel/fc'].value\n # normalize\n franckcondon = franckcondon/franckcondon.sum() # special to this case\n\n return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'pmein'], lamb, ver, rates.alt_km, br)\n",
"def getN22PG(rates, ver, lamb, br, reactfn):\n \"\"\" from Benesch et al, 1966a \"\"\"\n with h5py.File(str(reactfn), 'r', libver='latest') as f:\n A = f['/N2_2PG/A'].value\n lambdaA = f['/N2_2PG/lambda'].value.ravel(order='F')\n franckcondon = f['/N2_2PG/fc'].value\n\n return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p2pg'], lamb, ver, rates.alt_km, br)\n",
"def getN21PG(rates, ver, lamb, br, reactfn):\n\n with h5py.File(str(reactfn), 'r', libver='latest') as fid:\n A = fid['/N2_1PG/A'].value\n lambnew = fid['/N2_1PG/lambda'].value.ravel(order='F')\n franckcondon = fid['/N2_1PG/fc'].value\n\n tau1PG = 1 / np.nansum(A, axis=1)\n \"\"\"\n solve for base concentration\n confac=[1.66;1.56;1.31;1.07;.77;.5;.33;.17;.08;.04;.02;.004;.001]; %Cartwright, 1973b, stop at nuprime==12\n Gattinger and Vallance Jones 1974\n confac=array([1.66,1.86,1.57,1.07,.76,.45,.25,.14,.07,.03,.01,.004,.001])\n \"\"\"\n\n consfac = franckcondon/franckcondon.sum() # normalize\n losscoef = (consfac / tau1PG).sum()\n N01pg = rates.loc[..., 'p1pg'] / losscoef\n\n scalevec = (A * consfac[:, None]).ravel(order='F') # for clarity (verified with matlab)\n\n vnew = scalevec[None, None, :] * N01pg.values[..., None]\n\n return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)\n"
] |
#!/usr/bin/env python
from pathlib import Path
import numpy as np
import h5py
from typing import Tuple
import xarray
"""
inputs:
spec: excitation rates, 3-D , dimensions time x altitude x reaction
output:
ver: a pandas DataFrame, wavelength x altitude
br: flux-tube integrated intensity, dimension lamb
See Eqn 9 of Appendix C of Zettergren PhD thesis 2007 to get a better insight on what this set of functions do.
"""
def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
"""
concatenate along the reaction dimension, axis=-1
"""
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getAtomic(rates, ver, lamb, br, reactfn):
""" prompt atomic emissions (nm)
844.6 777.4
"""
with h5py.File(reactfn, 'r') as f:
lambnew = f['/atomic/lambda'].value.ravel(order='F') # some are not 1-D!
vnew = np.concatenate((rates.loc[..., 'po3p3p'].values[..., None],
rates.loc[..., 'po3p5p'].values[..., None]), axis=-1)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getN21NG(rates, ver, lamb, br, reactfn):
"""
excitation Franck-Condon factors (derived from Vallance Jones, 1974)
"""
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+1NG/A'].value
lambdaA = f['/N2+1NG/lambda'].value.ravel(order='F')
franckcondon = f['/N2+1NG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p1ng'], lamb, ver, rates.alt_km, br)
def getN2meinel(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+Meinel/A'].value
lambdaA = f['/N2+Meinel/lambda'].value.ravel(order='F')
franckcondon = f['/N2+Meinel/fc'].value
# normalize
franckcondon = franckcondon/franckcondon.sum() # special to this case
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'pmein'], lamb, ver, rates.alt_km, br)
def getN22PG(rates, ver, lamb, br, reactfn):
""" from Benesch et al, 1966a """
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2_2PG/A'].value
lambdaA = f['/N2_2PG/lambda'].value.ravel(order='F')
franckcondon = f['/N2_2PG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p2pg'], lamb, ver, rates.alt_km, br)
def getN21PG(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as fid:
A = fid['/N2_1PG/A'].value
lambnew = fid['/N2_1PG/lambda'].value.ravel(order='F')
franckcondon = fid['/N2_1PG/fc'].value
tau1PG = 1 / np.nansum(A, axis=1)
"""
solve for base concentration
confac=[1.66;1.56;1.31;1.07;.77;.5;.33;.17;.08;.04;.02;.004;.001]; %Cartwright, 1973b, stop at nuprime==12
Gattinger and Vallance Jones 1974
confac=array([1.66,1.86,1.57,1.07,.76,.45,.25,.14,.07,.03,.01,.004,.001])
"""
consfac = franckcondon/franckcondon.sum() # normalize
losscoef = (consfac / tau1PG).sum()
N01pg = rates.loc[..., 'p1pg'] / losscoef
scalevec = (A * consfac[:, None]).ravel(order='F') # for clarity (verified with matlab)
vnew = scalevec[None, None, :] * N01pg.values[..., None]
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):
"""
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
"""
tau = 1/np.nansum(Aein, axis=1)
scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')
vnew = scalevec[None, None, :]*kin.values[..., None]
return catvl(z, ver, vnew, lamb, lambnew, br)
def catvl(z, ver, vnew, lamb, lambnew, br):
"""
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
"""
if ver is not None:
br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!
ver = np.concatenate((ver, vnew), axis=-1)
lamb = np.concatenate((lamb, lambnew))
else:
ver = vnew.copy(order='F')
lamb = lambnew.copy()
br = np.trapz(ver, z, axis=-2)
return ver, lamb, br
def sortelimlambda(lamb, ver, br):
assert lamb.ndim == 1
assert lamb.size == ver.shape[-1]
# %% eliminate unused wavelengths and Einstein coeff
mask = np.isfinite(lamb)
ver = ver[..., mask]
lamb = lamb[mask]
br = br[:, mask]
# %% sort by lambda
lambSortInd = lamb.argsort() # lamb is made piecemeal and is overall non-monotonic
return lamb[lambSortInd], ver[..., lambSortInd], br[:, lambSortInd] # sort by wavelength ascending order
|
scivision/gridaurora
|
gridaurora/calcemissions.py
|
getMetastable
|
python
|
def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
|
concatenate along the reaction dimension, axis=-1
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L62-L76
|
[
"def catvl(z, ver, vnew, lamb, lambnew, br):\n \"\"\"\n trapz integrates over altitude axis, axis = -2\n concatenate over reaction dimension, axis = -1\n\n br: column integrated brightness\n lamb: wavelength [nm]\n ver: volume emission rate [photons / cm^-3 s^-3 ...]\n \"\"\"\n if ver is not None:\n br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!\n ver = np.concatenate((ver, vnew), axis=-1)\n lamb = np.concatenate((lamb, lambnew))\n else:\n ver = vnew.copy(order='F')\n lamb = lambnew.copy()\n br = np.trapz(ver, z, axis=-2)\n\n return ver, lamb, br\n"
] |
#!/usr/bin/env python
from pathlib import Path
import numpy as np
import h5py
from typing import Tuple
import xarray
"""
inputs:
spec: excitation rates, 3-D , dimensions time x altitude x reaction
output:
ver: a pandas DataFrame, wavelength x altitude
br: flux-tube integrated intensity, dimension lamb
See Eqn 9 of Appendix C of Zettergren PhD thesis 2007 to get a better insight on what this set of functions do.
"""
def calcemissions(rates: xarray.DataArray, sim) -> Tuple[xarray.DataArray, np.ndarray, np.ndarray]:
if not sim.reacreq:
return 0., 0., 0.
ver = None
lamb = None
br = None
"""
Franck-Condon factor
http://chemistry.illinoisstate.edu/standard/che460/handouts/460-Feb28lec-S13.pdf
http://assign3.chem.usyd.edu.au/spectroscopy/index.php
"""
# %% METASTABLE
if 'metastable' in sim.reacreq:
ver, lamb, br = getMetastable(rates, ver, lamb, br, sim.reactionfn)
# %% PROMPT ATOMIC OXYGEN EMISSIONS
if 'atomic' in sim.reacreq:
ver, lamb, br = getAtomic(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1N EMISSIONS
if 'n21ng' in sim.reacreq:
ver, lamb, br = getN21NG(rates, ver, lamb, br, sim.reactionfn)
# %% N2+ Meinel band
if 'n2meinel' in sim.reacreq:
ver, lamb, br = getN2meinel(rates, ver, lamb, br, sim.reactionfn)
# %% N2 2P (after Vallance Jones, 1974)
if 'n22pg' in sim.reacreq:
ver, lamb, br = getN22PG(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1P
if 'n21pg' in sim.reacreq:
ver, lamb, br = getN21PG(rates, ver, lamb, br, sim.reactionfn)
# %% Remove NaN wavelength entries
if ver is None:
raise ValueError('you have not selected any reactions to generate VER')
# %% sort by wavelength, eliminate NaN
lamb, ver, br = sortelimlambda(lamb, ver, br)
# %% assemble output
dfver = xarray.DataArray(data=ver, coords=[('alt_km', rates.alt_km),
('wavelength_nm', lamb)])
return dfver, ver, br
def getAtomic(rates, ver, lamb, br, reactfn):
""" prompt atomic emissions (nm)
844.6 777.4
"""
with h5py.File(reactfn, 'r') as f:
lambnew = f['/atomic/lambda'].value.ravel(order='F') # some are not 1-D!
vnew = np.concatenate((rates.loc[..., 'po3p3p'].values[..., None],
rates.loc[..., 'po3p5p'].values[..., None]), axis=-1)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getN21NG(rates, ver, lamb, br, reactfn):
"""
excitation Franck-Condon factors (derived from Vallance Jones, 1974)
"""
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+1NG/A'].value
lambdaA = f['/N2+1NG/lambda'].value.ravel(order='F')
franckcondon = f['/N2+1NG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p1ng'], lamb, ver, rates.alt_km, br)
def getN2meinel(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+Meinel/A'].value
lambdaA = f['/N2+Meinel/lambda'].value.ravel(order='F')
franckcondon = f['/N2+Meinel/fc'].value
# normalize
franckcondon = franckcondon/franckcondon.sum() # special to this case
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'pmein'], lamb, ver, rates.alt_km, br)
def getN22PG(rates, ver, lamb, br, reactfn):
""" from Benesch et al, 1966a """
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2_2PG/A'].value
lambdaA = f['/N2_2PG/lambda'].value.ravel(order='F')
franckcondon = f['/N2_2PG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p2pg'], lamb, ver, rates.alt_km, br)
def getN21PG(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as fid:
A = fid['/N2_1PG/A'].value
lambnew = fid['/N2_1PG/lambda'].value.ravel(order='F')
franckcondon = fid['/N2_1PG/fc'].value
tau1PG = 1 / np.nansum(A, axis=1)
"""
solve for base concentration
confac=[1.66;1.56;1.31;1.07;.77;.5;.33;.17;.08;.04;.02;.004;.001]; %Cartwright, 1973b, stop at nuprime==12
Gattinger and Vallance Jones 1974
confac=array([1.66,1.86,1.57,1.07,.76,.45,.25,.14,.07,.03,.01,.004,.001])
"""
consfac = franckcondon/franckcondon.sum() # normalize
losscoef = (consfac / tau1PG).sum()
N01pg = rates.loc[..., 'p1pg'] / losscoef
scalevec = (A * consfac[:, None]).ravel(order='F') # for clarity (verified with matlab)
vnew = scalevec[None, None, :] * N01pg.values[..., None]
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):
"""
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
"""
tau = 1/np.nansum(Aein, axis=1)
scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')
vnew = scalevec[None, None, :]*kin.values[..., None]
return catvl(z, ver, vnew, lamb, lambnew, br)
def catvl(z, ver, vnew, lamb, lambnew, br):
"""
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
"""
if ver is not None:
br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!
ver = np.concatenate((ver, vnew), axis=-1)
lamb = np.concatenate((lamb, lambnew))
else:
ver = vnew.copy(order='F')
lamb = lambnew.copy()
br = np.trapz(ver, z, axis=-2)
return ver, lamb, br
def sortelimlambda(lamb, ver, br):
assert lamb.ndim == 1
assert lamb.size == ver.shape[-1]
# %% eliminate unused wavelengths and Einstein coeff
mask = np.isfinite(lamb)
ver = ver[..., mask]
lamb = lamb[mask]
br = br[:, mask]
# %% sort by lambda
lambSortInd = lamb.argsort() # lamb is made piecemeal and is overall non-monotonic
return lamb[lambSortInd], ver[..., lambSortInd], br[:, lambSortInd] # sort by wavelength ascending order
|
scivision/gridaurora
|
gridaurora/calcemissions.py
|
getAtomic
|
python
|
def getAtomic(rates, ver, lamb, br, reactfn):
with h5py.File(reactfn, 'r') as f:
lambnew = f['/atomic/lambda'].value.ravel(order='F') # some are not 1-D!
vnew = np.concatenate((rates.loc[..., 'po3p3p'].values[..., None],
rates.loc[..., 'po3p5p'].values[..., None]), axis=-1)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
|
prompt atomic emissions (nm)
844.6 777.4
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L79-L89
|
[
"def catvl(z, ver, vnew, lamb, lambnew, br):\n \"\"\"\n trapz integrates over altitude axis, axis = -2\n concatenate over reaction dimension, axis = -1\n\n br: column integrated brightness\n lamb: wavelength [nm]\n ver: volume emission rate [photons / cm^-3 s^-3 ...]\n \"\"\"\n if ver is not None:\n br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!\n ver = np.concatenate((ver, vnew), axis=-1)\n lamb = np.concatenate((lamb, lambnew))\n else:\n ver = vnew.copy(order='F')\n lamb = lambnew.copy()\n br = np.trapz(ver, z, axis=-2)\n\n return ver, lamb, br\n"
] |
#!/usr/bin/env python
from pathlib import Path
import numpy as np
import h5py
from typing import Tuple
import xarray
"""
inputs:
spec: excitation rates, 3-D , dimensions time x altitude x reaction
output:
ver: a pandas DataFrame, wavelength x altitude
br: flux-tube integrated intensity, dimension lamb
See Eqn 9 of Appendix C of Zettergren PhD thesis 2007 to get a better insight on what this set of functions do.
"""
def calcemissions(rates: xarray.DataArray, sim) -> Tuple[xarray.DataArray, np.ndarray, np.ndarray]:
if not sim.reacreq:
return 0., 0., 0.
ver = None
lamb = None
br = None
"""
Franck-Condon factor
http://chemistry.illinoisstate.edu/standard/che460/handouts/460-Feb28lec-S13.pdf
http://assign3.chem.usyd.edu.au/spectroscopy/index.php
"""
# %% METASTABLE
if 'metastable' in sim.reacreq:
ver, lamb, br = getMetastable(rates, ver, lamb, br, sim.reactionfn)
# %% PROMPT ATOMIC OXYGEN EMISSIONS
if 'atomic' in sim.reacreq:
ver, lamb, br = getAtomic(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1N EMISSIONS
if 'n21ng' in sim.reacreq:
ver, lamb, br = getN21NG(rates, ver, lamb, br, sim.reactionfn)
# %% N2+ Meinel band
if 'n2meinel' in sim.reacreq:
ver, lamb, br = getN2meinel(rates, ver, lamb, br, sim.reactionfn)
# %% N2 2P (after Vallance Jones, 1974)
if 'n22pg' in sim.reacreq:
ver, lamb, br = getN22PG(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1P
if 'n21pg' in sim.reacreq:
ver, lamb, br = getN21PG(rates, ver, lamb, br, sim.reactionfn)
# %% Remove NaN wavelength entries
if ver is None:
raise ValueError('you have not selected any reactions to generate VER')
# %% sort by wavelength, eliminate NaN
lamb, ver, br = sortelimlambda(lamb, ver, br)
# %% assemble output
dfver = xarray.DataArray(data=ver, coords=[('alt_km', rates.alt_km),
('wavelength_nm', lamb)])
return dfver, ver, br
def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
"""
concatenate along the reaction dimension, axis=-1
"""
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getN21NG(rates, ver, lamb, br, reactfn):
"""
excitation Franck-Condon factors (derived from Vallance Jones, 1974)
"""
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+1NG/A'].value
lambdaA = f['/N2+1NG/lambda'].value.ravel(order='F')
franckcondon = f['/N2+1NG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p1ng'], lamb, ver, rates.alt_km, br)
def getN2meinel(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+Meinel/A'].value
lambdaA = f['/N2+Meinel/lambda'].value.ravel(order='F')
franckcondon = f['/N2+Meinel/fc'].value
# normalize
franckcondon = franckcondon/franckcondon.sum() # special to this case
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'pmein'], lamb, ver, rates.alt_km, br)
def getN22PG(rates, ver, lamb, br, reactfn):
""" from Benesch et al, 1966a """
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2_2PG/A'].value
lambdaA = f['/N2_2PG/lambda'].value.ravel(order='F')
franckcondon = f['/N2_2PG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p2pg'], lamb, ver, rates.alt_km, br)
def getN21PG(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as fid:
A = fid['/N2_1PG/A'].value
lambnew = fid['/N2_1PG/lambda'].value.ravel(order='F')
franckcondon = fid['/N2_1PG/fc'].value
tau1PG = 1 / np.nansum(A, axis=1)
"""
solve for base concentration
confac=[1.66;1.56;1.31;1.07;.77;.5;.33;.17;.08;.04;.02;.004;.001]; %Cartwright, 1973b, stop at nuprime==12
Gattinger and Vallance Jones 1974
confac=array([1.66,1.86,1.57,1.07,.76,.45,.25,.14,.07,.03,.01,.004,.001])
"""
consfac = franckcondon/franckcondon.sum() # normalize
losscoef = (consfac / tau1PG).sum()
N01pg = rates.loc[..., 'p1pg'] / losscoef
scalevec = (A * consfac[:, None]).ravel(order='F') # for clarity (verified with matlab)
vnew = scalevec[None, None, :] * N01pg.values[..., None]
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):
"""
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
"""
tau = 1/np.nansum(Aein, axis=1)
scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')
vnew = scalevec[None, None, :]*kin.values[..., None]
return catvl(z, ver, vnew, lamb, lambnew, br)
def catvl(z, ver, vnew, lamb, lambnew, br):
"""
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
"""
if ver is not None:
br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!
ver = np.concatenate((ver, vnew), axis=-1)
lamb = np.concatenate((lamb, lambnew))
else:
ver = vnew.copy(order='F')
lamb = lambnew.copy()
br = np.trapz(ver, z, axis=-2)
return ver, lamb, br
def sortelimlambda(lamb, ver, br):
assert lamb.ndim == 1
assert lamb.size == ver.shape[-1]
# %% eliminate unused wavelengths and Einstein coeff
mask = np.isfinite(lamb)
ver = ver[..., mask]
lamb = lamb[mask]
br = br[:, mask]
# %% sort by lambda
lambSortInd = lamb.argsort() # lamb is made piecemeal and is overall non-monotonic
return lamb[lambSortInd], ver[..., lambSortInd], br[:, lambSortInd] # sort by wavelength ascending order
|
scivision/gridaurora
|
gridaurora/calcemissions.py
|
getN21NG
|
python
|
def getN21NG(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+1NG/A'].value
lambdaA = f['/N2+1NG/lambda'].value.ravel(order='F')
franckcondon = f['/N2+1NG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p1ng'], lamb, ver, rates.alt_km, br)
|
excitation Franck-Condon factors (derived from Vallance Jones, 1974)
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L92-L101
|
[
"def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):\n \"\"\"\n ver dimensions: wavelength, altitude, time\n\n A and lambda dimensions:\n axis 0 is upper state vib. level (nu')\n axis 1 is bottom state vib level (nu'')\n there is a Franck-Condon parameter (variable fc) for each upper state nu'\n \"\"\"\n tau = 1/np.nansum(Aein, axis=1)\n\n scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')\n\n vnew = scalevec[None, None, :]*kin.values[..., None]\n\n return catvl(z, ver, vnew, lamb, lambnew, br)\n"
] |
#!/usr/bin/env python
from pathlib import Path
import numpy as np
import h5py
from typing import Tuple
import xarray
"""
inputs:
spec: excitation rates, 3-D , dimensions time x altitude x reaction
output:
ver: a pandas DataFrame, wavelength x altitude
br: flux-tube integrated intensity, dimension lamb
See Eqn 9 of Appendix C of Zettergren PhD thesis 2007 to get a better insight on what this set of functions do.
"""
def calcemissions(rates: xarray.DataArray, sim) -> Tuple[xarray.DataArray, np.ndarray, np.ndarray]:
if not sim.reacreq:
return 0., 0., 0.
ver = None
lamb = None
br = None
"""
Franck-Condon factor
http://chemistry.illinoisstate.edu/standard/che460/handouts/460-Feb28lec-S13.pdf
http://assign3.chem.usyd.edu.au/spectroscopy/index.php
"""
# %% METASTABLE
if 'metastable' in sim.reacreq:
ver, lamb, br = getMetastable(rates, ver, lamb, br, sim.reactionfn)
# %% PROMPT ATOMIC OXYGEN EMISSIONS
if 'atomic' in sim.reacreq:
ver, lamb, br = getAtomic(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1N EMISSIONS
if 'n21ng' in sim.reacreq:
ver, lamb, br = getN21NG(rates, ver, lamb, br, sim.reactionfn)
# %% N2+ Meinel band
if 'n2meinel' in sim.reacreq:
ver, lamb, br = getN2meinel(rates, ver, lamb, br, sim.reactionfn)
# %% N2 2P (after Vallance Jones, 1974)
if 'n22pg' in sim.reacreq:
ver, lamb, br = getN22PG(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1P
if 'n21pg' in sim.reacreq:
ver, lamb, br = getN21PG(rates, ver, lamb, br, sim.reactionfn)
# %% Remove NaN wavelength entries
if ver is None:
raise ValueError('you have not selected any reactions to generate VER')
# %% sort by wavelength, eliminate NaN
lamb, ver, br = sortelimlambda(lamb, ver, br)
# %% assemble output
dfver = xarray.DataArray(data=ver, coords=[('alt_km', rates.alt_km),
('wavelength_nm', lamb)])
return dfver, ver, br
def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
"""
concatenate along the reaction dimension, axis=-1
"""
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getAtomic(rates, ver, lamb, br, reactfn):
""" prompt atomic emissions (nm)
844.6 777.4
"""
with h5py.File(reactfn, 'r') as f:
lambnew = f['/atomic/lambda'].value.ravel(order='F') # some are not 1-D!
vnew = np.concatenate((rates.loc[..., 'po3p3p'].values[..., None],
rates.loc[..., 'po3p5p'].values[..., None]), axis=-1)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getN2meinel(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+Meinel/A'].value
lambdaA = f['/N2+Meinel/lambda'].value.ravel(order='F')
franckcondon = f['/N2+Meinel/fc'].value
# normalize
franckcondon = franckcondon/franckcondon.sum() # special to this case
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'pmein'], lamb, ver, rates.alt_km, br)
def getN22PG(rates, ver, lamb, br, reactfn):
""" from Benesch et al, 1966a """
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2_2PG/A'].value
lambdaA = f['/N2_2PG/lambda'].value.ravel(order='F')
franckcondon = f['/N2_2PG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p2pg'], lamb, ver, rates.alt_km, br)
def getN21PG(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as fid:
A = fid['/N2_1PG/A'].value
lambnew = fid['/N2_1PG/lambda'].value.ravel(order='F')
franckcondon = fid['/N2_1PG/fc'].value
tau1PG = 1 / np.nansum(A, axis=1)
"""
solve for base concentration
confac=[1.66;1.56;1.31;1.07;.77;.5;.33;.17;.08;.04;.02;.004;.001]; %Cartwright, 1973b, stop at nuprime==12
Gattinger and Vallance Jones 1974
confac=array([1.66,1.86,1.57,1.07,.76,.45,.25,.14,.07,.03,.01,.004,.001])
"""
consfac = franckcondon/franckcondon.sum() # normalize
losscoef = (consfac / tau1PG).sum()
N01pg = rates.loc[..., 'p1pg'] / losscoef
scalevec = (A * consfac[:, None]).ravel(order='F') # for clarity (verified with matlab)
vnew = scalevec[None, None, :] * N01pg.values[..., None]
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):
"""
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
"""
tau = 1/np.nansum(Aein, axis=1)
scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')
vnew = scalevec[None, None, :]*kin.values[..., None]
return catvl(z, ver, vnew, lamb, lambnew, br)
def catvl(z, ver, vnew, lamb, lambnew, br):
"""
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
"""
if ver is not None:
br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!
ver = np.concatenate((ver, vnew), axis=-1)
lamb = np.concatenate((lamb, lambnew))
else:
ver = vnew.copy(order='F')
lamb = lambnew.copy()
br = np.trapz(ver, z, axis=-2)
return ver, lamb, br
def sortelimlambda(lamb, ver, br):
assert lamb.ndim == 1
assert lamb.size == ver.shape[-1]
# %% eliminate unused wavelengths and Einstein coeff
mask = np.isfinite(lamb)
ver = ver[..., mask]
lamb = lamb[mask]
br = br[:, mask]
# %% sort by lambda
lambSortInd = lamb.argsort() # lamb is made piecemeal and is overall non-monotonic
return lamb[lambSortInd], ver[..., lambSortInd], br[:, lambSortInd] # sort by wavelength ascending order
|
scivision/gridaurora
|
gridaurora/calcemissions.py
|
getN21PG
|
python
|
def getN21PG(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as fid:
A = fid['/N2_1PG/A'].value
lambnew = fid['/N2_1PG/lambda'].value.ravel(order='F')
franckcondon = fid['/N2_1PG/fc'].value
tau1PG = 1 / np.nansum(A, axis=1)
consfac = franckcondon/franckcondon.sum() # normalize
losscoef = (consfac / tau1PG).sum()
N01pg = rates.loc[..., 'p1pg'] / losscoef
scalevec = (A * consfac[:, None]).ravel(order='F') # for clarity (verified with matlab)
vnew = scalevec[None, None, :] * N01pg.values[..., None]
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
|
solve for base concentration
confac=[1.66;1.56;1.31;1.07;.77;.5;.33;.17;.08;.04;.02;.004;.001]; %Cartwright, 1973b, stop at nuprime==12
Gattinger and Vallance Jones 1974
confac=array([1.66,1.86,1.57,1.07,.76,.45,.25,.14,.07,.03,.01,.004,.001])
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L125-L148
|
[
"def catvl(z, ver, vnew, lamb, lambnew, br):\n \"\"\"\n trapz integrates over altitude axis, axis = -2\n concatenate over reaction dimension, axis = -1\n\n br: column integrated brightness\n lamb: wavelength [nm]\n ver: volume emission rate [photons / cm^-3 s^-3 ...]\n \"\"\"\n if ver is not None:\n br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!\n ver = np.concatenate((ver, vnew), axis=-1)\n lamb = np.concatenate((lamb, lambnew))\n else:\n ver = vnew.copy(order='F')\n lamb = lambnew.copy()\n br = np.trapz(ver, z, axis=-2)\n\n return ver, lamb, br\n"
] |
#!/usr/bin/env python
from pathlib import Path
import numpy as np
import h5py
from typing import Tuple
import xarray
"""
inputs:
spec: excitation rates, 3-D , dimensions time x altitude x reaction
output:
ver: a pandas DataFrame, wavelength x altitude
br: flux-tube integrated intensity, dimension lamb
See Eqn 9 of Appendix C of Zettergren PhD thesis 2007 to get a better insight on what this set of functions do.
"""
def calcemissions(rates: xarray.DataArray, sim) -> Tuple[xarray.DataArray, np.ndarray, np.ndarray]:
if not sim.reacreq:
return 0., 0., 0.
ver = None
lamb = None
br = None
"""
Franck-Condon factor
http://chemistry.illinoisstate.edu/standard/che460/handouts/460-Feb28lec-S13.pdf
http://assign3.chem.usyd.edu.au/spectroscopy/index.php
"""
# %% METASTABLE
if 'metastable' in sim.reacreq:
ver, lamb, br = getMetastable(rates, ver, lamb, br, sim.reactionfn)
# %% PROMPT ATOMIC OXYGEN EMISSIONS
if 'atomic' in sim.reacreq:
ver, lamb, br = getAtomic(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1N EMISSIONS
if 'n21ng' in sim.reacreq:
ver, lamb, br = getN21NG(rates, ver, lamb, br, sim.reactionfn)
# %% N2+ Meinel band
if 'n2meinel' in sim.reacreq:
ver, lamb, br = getN2meinel(rates, ver, lamb, br, sim.reactionfn)
# %% N2 2P (after Vallance Jones, 1974)
if 'n22pg' in sim.reacreq:
ver, lamb, br = getN22PG(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1P
if 'n21pg' in sim.reacreq:
ver, lamb, br = getN21PG(rates, ver, lamb, br, sim.reactionfn)
# %% Remove NaN wavelength entries
if ver is None:
raise ValueError('you have not selected any reactions to generate VER')
# %% sort by wavelength, eliminate NaN
lamb, ver, br = sortelimlambda(lamb, ver, br)
# %% assemble output
dfver = xarray.DataArray(data=ver, coords=[('alt_km', rates.alt_km),
('wavelength_nm', lamb)])
return dfver, ver, br
def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
"""
concatenate along the reaction dimension, axis=-1
"""
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getAtomic(rates, ver, lamb, br, reactfn):
""" prompt atomic emissions (nm)
844.6 777.4
"""
with h5py.File(reactfn, 'r') as f:
lambnew = f['/atomic/lambda'].value.ravel(order='F') # some are not 1-D!
vnew = np.concatenate((rates.loc[..., 'po3p3p'].values[..., None],
rates.loc[..., 'po3p5p'].values[..., None]), axis=-1)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getN21NG(rates, ver, lamb, br, reactfn):
"""
excitation Franck-Condon factors (derived from Vallance Jones, 1974)
"""
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+1NG/A'].value
lambdaA = f['/N2+1NG/lambda'].value.ravel(order='F')
franckcondon = f['/N2+1NG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p1ng'], lamb, ver, rates.alt_km, br)
def getN2meinel(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+Meinel/A'].value
lambdaA = f['/N2+Meinel/lambda'].value.ravel(order='F')
franckcondon = f['/N2+Meinel/fc'].value
# normalize
franckcondon = franckcondon/franckcondon.sum() # special to this case
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'pmein'], lamb, ver, rates.alt_km, br)
def getN22PG(rates, ver, lamb, br, reactfn):
""" from Benesch et al, 1966a """
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2_2PG/A'].value
lambdaA = f['/N2_2PG/lambda'].value.ravel(order='F')
franckcondon = f['/N2_2PG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p2pg'], lamb, ver, rates.alt_km, br)
def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):
"""
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
"""
tau = 1/np.nansum(Aein, axis=1)
scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')
vnew = scalevec[None, None, :]*kin.values[..., None]
return catvl(z, ver, vnew, lamb, lambnew, br)
def catvl(z, ver, vnew, lamb, lambnew, br):
"""
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
"""
if ver is not None:
br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!
ver = np.concatenate((ver, vnew), axis=-1)
lamb = np.concatenate((lamb, lambnew))
else:
ver = vnew.copy(order='F')
lamb = lambnew.copy()
br = np.trapz(ver, z, axis=-2)
return ver, lamb, br
def sortelimlambda(lamb, ver, br):
assert lamb.ndim == 1
assert lamb.size == ver.shape[-1]
# %% eliminate unused wavelengths and Einstein coeff
mask = np.isfinite(lamb)
ver = ver[..., mask]
lamb = lamb[mask]
br = br[:, mask]
# %% sort by lambda
lambSortInd = lamb.argsort() # lamb is made piecemeal and is overall non-monotonic
return lamb[lambSortInd], ver[..., lambSortInd], br[:, lambSortInd] # sort by wavelength ascending order
|
scivision/gridaurora
|
gridaurora/calcemissions.py
|
doBandTrapz
|
python
|
def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):
tau = 1/np.nansum(Aein, axis=1)
scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')
vnew = scalevec[None, None, :]*kin.values[..., None]
return catvl(z, ver, vnew, lamb, lambnew, br)
|
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L151-L166
|
[
"def catvl(z, ver, vnew, lamb, lambnew, br):\n \"\"\"\n trapz integrates over altitude axis, axis = -2\n concatenate over reaction dimension, axis = -1\n\n br: column integrated brightness\n lamb: wavelength [nm]\n ver: volume emission rate [photons / cm^-3 s^-3 ...]\n \"\"\"\n if ver is not None:\n br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!\n ver = np.concatenate((ver, vnew), axis=-1)\n lamb = np.concatenate((lamb, lambnew))\n else:\n ver = vnew.copy(order='F')\n lamb = lambnew.copy()\n br = np.trapz(ver, z, axis=-2)\n\n return ver, lamb, br\n"
] |
#!/usr/bin/env python
from pathlib import Path
import numpy as np
import h5py
from typing import Tuple
import xarray
"""
inputs:
spec: excitation rates, 3-D , dimensions time x altitude x reaction
output:
ver: a pandas DataFrame, wavelength x altitude
br: flux-tube integrated intensity, dimension lamb
See Eqn 9 of Appendix C of Zettergren PhD thesis 2007 to get a better insight on what this set of functions do.
"""
def calcemissions(rates: xarray.DataArray, sim) -> Tuple[xarray.DataArray, np.ndarray, np.ndarray]:
if not sim.reacreq:
return 0., 0., 0.
ver = None
lamb = None
br = None
"""
Franck-Condon factor
http://chemistry.illinoisstate.edu/standard/che460/handouts/460-Feb28lec-S13.pdf
http://assign3.chem.usyd.edu.au/spectroscopy/index.php
"""
# %% METASTABLE
if 'metastable' in sim.reacreq:
ver, lamb, br = getMetastable(rates, ver, lamb, br, sim.reactionfn)
# %% PROMPT ATOMIC OXYGEN EMISSIONS
if 'atomic' in sim.reacreq:
ver, lamb, br = getAtomic(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1N EMISSIONS
if 'n21ng' in sim.reacreq:
ver, lamb, br = getN21NG(rates, ver, lamb, br, sim.reactionfn)
# %% N2+ Meinel band
if 'n2meinel' in sim.reacreq:
ver, lamb, br = getN2meinel(rates, ver, lamb, br, sim.reactionfn)
# %% N2 2P (after Vallance Jones, 1974)
if 'n22pg' in sim.reacreq:
ver, lamb, br = getN22PG(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1P
if 'n21pg' in sim.reacreq:
ver, lamb, br = getN21PG(rates, ver, lamb, br, sim.reactionfn)
# %% Remove NaN wavelength entries
if ver is None:
raise ValueError('you have not selected any reactions to generate VER')
# %% sort by wavelength, eliminate NaN
lamb, ver, br = sortelimlambda(lamb, ver, br)
# %% assemble output
dfver = xarray.DataArray(data=ver, coords=[('alt_km', rates.alt_km),
('wavelength_nm', lamb)])
return dfver, ver, br
def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
"""
concatenate along the reaction dimension, axis=-1
"""
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getAtomic(rates, ver, lamb, br, reactfn):
""" prompt atomic emissions (nm)
844.6 777.4
"""
with h5py.File(reactfn, 'r') as f:
lambnew = f['/atomic/lambda'].value.ravel(order='F') # some are not 1-D!
vnew = np.concatenate((rates.loc[..., 'po3p3p'].values[..., None],
rates.loc[..., 'po3p5p'].values[..., None]), axis=-1)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getN21NG(rates, ver, lamb, br, reactfn):
"""
excitation Franck-Condon factors (derived from Vallance Jones, 1974)
"""
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+1NG/A'].value
lambdaA = f['/N2+1NG/lambda'].value.ravel(order='F')
franckcondon = f['/N2+1NG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p1ng'], lamb, ver, rates.alt_km, br)
def getN2meinel(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+Meinel/A'].value
lambdaA = f['/N2+Meinel/lambda'].value.ravel(order='F')
franckcondon = f['/N2+Meinel/fc'].value
# normalize
franckcondon = franckcondon/franckcondon.sum() # special to this case
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'pmein'], lamb, ver, rates.alt_km, br)
def getN22PG(rates, ver, lamb, br, reactfn):
""" from Benesch et al, 1966a """
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2_2PG/A'].value
lambdaA = f['/N2_2PG/lambda'].value.ravel(order='F')
franckcondon = f['/N2_2PG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p2pg'], lamb, ver, rates.alt_km, br)
def getN21PG(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as fid:
A = fid['/N2_1PG/A'].value
lambnew = fid['/N2_1PG/lambda'].value.ravel(order='F')
franckcondon = fid['/N2_1PG/fc'].value
tau1PG = 1 / np.nansum(A, axis=1)
"""
solve for base concentration
confac=[1.66;1.56;1.31;1.07;.77;.5;.33;.17;.08;.04;.02;.004;.001]; %Cartwright, 1973b, stop at nuprime==12
Gattinger and Vallance Jones 1974
confac=array([1.66,1.86,1.57,1.07,.76,.45,.25,.14,.07,.03,.01,.004,.001])
"""
consfac = franckcondon/franckcondon.sum() # normalize
losscoef = (consfac / tau1PG).sum()
N01pg = rates.loc[..., 'p1pg'] / losscoef
scalevec = (A * consfac[:, None]).ravel(order='F') # for clarity (verified with matlab)
vnew = scalevec[None, None, :] * N01pg.values[..., None]
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def catvl(z, ver, vnew, lamb, lambnew, br):
"""
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
"""
if ver is not None:
br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!
ver = np.concatenate((ver, vnew), axis=-1)
lamb = np.concatenate((lamb, lambnew))
else:
ver = vnew.copy(order='F')
lamb = lambnew.copy()
br = np.trapz(ver, z, axis=-2)
return ver, lamb, br
def sortelimlambda(lamb, ver, br):
assert lamb.ndim == 1
assert lamb.size == ver.shape[-1]
# %% eliminate unused wavelengths and Einstein coeff
mask = np.isfinite(lamb)
ver = ver[..., mask]
lamb = lamb[mask]
br = br[:, mask]
# %% sort by lambda
lambSortInd = lamb.argsort() # lamb is made piecemeal and is overall non-monotonic
return lamb[lambSortInd], ver[..., lambSortInd], br[:, lambSortInd] # sort by wavelength ascending order
|
scivision/gridaurora
|
gridaurora/calcemissions.py
|
catvl
|
python
|
def catvl(z, ver, vnew, lamb, lambnew, br):
if ver is not None:
br = np.concatenate((br, np.trapz(vnew, z, axis=-2)), axis=-1) # must come first!
ver = np.concatenate((ver, vnew), axis=-1)
lamb = np.concatenate((lamb, lambnew))
else:
ver = vnew.copy(order='F')
lamb = lambnew.copy()
br = np.trapz(ver, z, axis=-2)
return ver, lamb, br
|
trapz integrates over altitude axis, axis = -2
concatenate over reaction dimension, axis = -1
br: column integrated brightness
lamb: wavelength [nm]
ver: volume emission rate [photons / cm^-3 s^-3 ...]
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/calcemissions.py#L169-L187
| null |
#!/usr/bin/env python
from pathlib import Path
import numpy as np
import h5py
from typing import Tuple
import xarray
"""
inputs:
spec: excitation rates, 3-D , dimensions time x altitude x reaction
output:
ver: a pandas DataFrame, wavelength x altitude
br: flux-tube integrated intensity, dimension lamb
See Eqn 9 of Appendix C of Zettergren PhD thesis 2007 to get a better insight on what this set of functions do.
"""
def calcemissions(rates: xarray.DataArray, sim) -> Tuple[xarray.DataArray, np.ndarray, np.ndarray]:
if not sim.reacreq:
return 0., 0., 0.
ver = None
lamb = None
br = None
"""
Franck-Condon factor
http://chemistry.illinoisstate.edu/standard/che460/handouts/460-Feb28lec-S13.pdf
http://assign3.chem.usyd.edu.au/spectroscopy/index.php
"""
# %% METASTABLE
if 'metastable' in sim.reacreq:
ver, lamb, br = getMetastable(rates, ver, lamb, br, sim.reactionfn)
# %% PROMPT ATOMIC OXYGEN EMISSIONS
if 'atomic' in sim.reacreq:
ver, lamb, br = getAtomic(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1N EMISSIONS
if 'n21ng' in sim.reacreq:
ver, lamb, br = getN21NG(rates, ver, lamb, br, sim.reactionfn)
# %% N2+ Meinel band
if 'n2meinel' in sim.reacreq:
ver, lamb, br = getN2meinel(rates, ver, lamb, br, sim.reactionfn)
# %% N2 2P (after Vallance Jones, 1974)
if 'n22pg' in sim.reacreq:
ver, lamb, br = getN22PG(rates, ver, lamb, br, sim.reactionfn)
# %% N2 1P
if 'n21pg' in sim.reacreq:
ver, lamb, br = getN21PG(rates, ver, lamb, br, sim.reactionfn)
# %% Remove NaN wavelength entries
if ver is None:
raise ValueError('you have not selected any reactions to generate VER')
# %% sort by wavelength, eliminate NaN
lamb, ver, br = sortelimlambda(lamb, ver, br)
# %% assemble output
dfver = xarray.DataArray(data=ver, coords=[('alt_km', rates.alt_km),
('wavelength_nm', lamb)])
return dfver, ver, br
def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
"""
concatenate along the reaction dimension, axis=-1
"""
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getAtomic(rates, ver, lamb, br, reactfn):
""" prompt atomic emissions (nm)
844.6 777.4
"""
with h5py.File(reactfn, 'r') as f:
lambnew = f['/atomic/lambda'].value.ravel(order='F') # some are not 1-D!
vnew = np.concatenate((rates.loc[..., 'po3p3p'].values[..., None],
rates.loc[..., 'po3p5p'].values[..., None]), axis=-1)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def getN21NG(rates, ver, lamb, br, reactfn):
"""
excitation Franck-Condon factors (derived from Vallance Jones, 1974)
"""
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+1NG/A'].value
lambdaA = f['/N2+1NG/lambda'].value.ravel(order='F')
franckcondon = f['/N2+1NG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p1ng'], lamb, ver, rates.alt_km, br)
def getN2meinel(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2+Meinel/A'].value
lambdaA = f['/N2+Meinel/lambda'].value.ravel(order='F')
franckcondon = f['/N2+Meinel/fc'].value
# normalize
franckcondon = franckcondon/franckcondon.sum() # special to this case
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'pmein'], lamb, ver, rates.alt_km, br)
def getN22PG(rates, ver, lamb, br, reactfn):
""" from Benesch et al, 1966a """
with h5py.File(str(reactfn), 'r', libver='latest') as f:
A = f['/N2_2PG/A'].value
lambdaA = f['/N2_2PG/lambda'].value.ravel(order='F')
franckcondon = f['/N2_2PG/fc'].value
return doBandTrapz(A, lambdaA, franckcondon, rates.loc[..., 'p2pg'], lamb, ver, rates.alt_km, br)
def getN21PG(rates, ver, lamb, br, reactfn):
with h5py.File(str(reactfn), 'r', libver='latest') as fid:
A = fid['/N2_1PG/A'].value
lambnew = fid['/N2_1PG/lambda'].value.ravel(order='F')
franckcondon = fid['/N2_1PG/fc'].value
tau1PG = 1 / np.nansum(A, axis=1)
"""
solve for base concentration
confac=[1.66;1.56;1.31;1.07;.77;.5;.33;.17;.08;.04;.02;.004;.001]; %Cartwright, 1973b, stop at nuprime==12
Gattinger and Vallance Jones 1974
confac=array([1.66,1.86,1.57,1.07,.76,.45,.25,.14,.07,.03,.01,.004,.001])
"""
consfac = franckcondon/franckcondon.sum() # normalize
losscoef = (consfac / tau1PG).sum()
N01pg = rates.loc[..., 'p1pg'] / losscoef
scalevec = (A * consfac[:, None]).ravel(order='F') # for clarity (verified with matlab)
vnew = scalevec[None, None, :] * N01pg.values[..., None]
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br)
def doBandTrapz(Aein, lambnew, fc, kin, lamb, ver, z, br):
"""
ver dimensions: wavelength, altitude, time
A and lambda dimensions:
axis 0 is upper state vib. level (nu')
axis 1 is bottom state vib level (nu'')
there is a Franck-Condon parameter (variable fc) for each upper state nu'
"""
tau = 1/np.nansum(Aein, axis=1)
scalevec = (Aein * tau[:, None] * fc[:, None]).ravel(order='F')
vnew = scalevec[None, None, :]*kin.values[..., None]
return catvl(z, ver, vnew, lamb, lambnew, br)
def sortelimlambda(lamb, ver, br):
assert lamb.ndim == 1
assert lamb.size == ver.shape[-1]
# %% eliminate unused wavelengths and Einstein coeff
mask = np.isfinite(lamb)
ver = ver[..., mask]
lamb = lamb[mask]
br = br[:, mask]
# %% sort by lambda
lambSortInd = lamb.argsort() # lamb is made piecemeal and is overall non-monotonic
return lamb[lambSortInd], ver[..., lambSortInd], br[:, lambSortInd] # sort by wavelength ascending order
|
scivision/gridaurora
|
gridaurora/solarangle.py
|
solarzenithangle
|
python
|
def solarzenithangle(time: datetime, glat: float, glon: float, alt_m: float) -> tuple:
time = totime(time)
obs = EarthLocation(lat=glat*u.deg, lon=glon*u.deg, height=alt_m*u.m)
times = Time(time, scale='ut1')
sun = get_sun(times)
sunobs = sun.transform_to(AltAz(obstime=times, location=obs))
return 90 - sunobs.alt.degree, sun, sunobs
|
Input:
t: scalar or array of datetime
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/solarangle.py#L8-L21
|
[
"def totime(time: Union[str, datetime, np.datetime64]) -> np.ndarray:\n time = np.atleast_1d(time)\n\n if isinstance(time[0], (datetime, np.datetime64)):\n pass\n elif isinstance(time[0], str):\n time = np.atleast_1d(list(map(parse, time)))\n\n return time.squeeze()[()]\n"
] |
from datetime import datetime
import astropy.units as u
from astropy.coordinates import get_sun, EarthLocation, AltAz
from astropy.time import Time
from . import totime
|
scivision/gridaurora
|
gridaurora/eFluxGen.py
|
maxwellian
|
python
|
def maxwellian(E: np.ndarray, E0: np.ndarray, Q0: np.ndarray) -> Tuple[np.ndarray, float]:
E0 = np.atleast_1d(E0)
Q0 = np.atleast_1d(Q0)
assert E0.ndim == Q0.ndim == 1
assert (Q0.size == 1 or Q0.size == E0.size)
Phi = Q0/(2*pi*E0**3) * E[:, None] * np.exp(-E[:, None]/E0)
Q = np.trapz(Phi, E, axis=0)
logging.info('total maxwellian flux Q: ' + (' '.join('{:.1e}'.format(q) for q in Q)))
return Phi, Q
|
input:
------
E: 1-D vector of energy bins [eV]
E0: characteristic energy (scalar or vector) [eV]
Q0: flux coefficient (scalar or vector) (to yield overall flux Q)
output:
-------
Phi: differential number flux
Q: total flux
Tanaka 2006 Eqn. 1
http://odin.gi.alaska.edu/lumm/Papers/Tanaka_2006JA011744.pdf
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/eFluxGen.py#L14-L39
| null |
"""
Michael Hirsch
based on Strickland 1993
"""
import logging
from pathlib import Path
import numpy as np
import h5py
from typing import Tuple
pi = np.pi
def fluxgen(E, E0, Q0, Wbc, bl, bm, bh, Bm, Bhf, verbose: int = 0) -> tuple:
Wb = Wbc*E0
isimE0 = abs(E - E0).argmin()
base = gaussflux(E, Wb, E0, Q0)
diffnumflux = base.copy()
low = letail(E, E0, Q0, bl, verbose)
diffnumflux += low # intermediate result
mid = midtail(E, E0, bm, Bm)
diffnumflux += mid # intermediate result
hi = hitail(E, diffnumflux, isimE0, E0, Bhf, bh, verbose)
diffnumflux += hi
if verbose > 0:
diprat(E0, diffnumflux, isimE0)
Q = np.trapz(diffnumflux, E, axis=0)
if verbose > 0:
print('total flux Q: ' + (' '.join('{:.1e}'.format(q) for q in Q)))
return np.asfortranarray(diffnumflux), low, mid, hi, base, Q
def letail(E: np.ndarray, E0: float, Q0: float, bl: float, verbose: int = 0) -> np.ndarray:
# for LET, 1<b<2
# Bl = 8200. #820 (typo?)
Bl = 0.4*Q0/(2*pi*E0**2) * np.exp(-1)
# bl = 1.0 #1
low = Bl * (E[:, None]/E0)**-bl
low[E[:, None] > E0] = 0.
if verbose > 0:
print('Bl: ' + (' '.join('{:0.1f}'.format(b) for b in Bl)))
return low
def midtail(E: np.ndarray, E0: np.ndarray, bm: float, Bm: float):
# Bm = 1.8e4 #1.8e4
# bm = 3. #3
mid = Bm*(E[:, None]/E0)**bm
mid[E[:, None] > E0] = 0.
return mid
def hitail(E: np.ndarray, diffnumflux: np.ndarray, isimE0: np.ndarray, E0: np.ndarray,
Bhf: np.ndarray, bh: float, verbose: int = 0):
"""
strickland 1993 said 0.2, but 0.145 gives better match to peak flux at 2500 = E0
"""
Bh = np.empty_like(E0)
for iE0 in np.arange(E0.size):
Bh[iE0] = Bhf[iE0]*diffnumflux[isimE0[iE0], iE0] # 4100.
# bh = 4 #2.9
het = Bh*(E[:, None] / E0)**-bh
het[E[:, None] < E0] = 0.
if verbose > 0:
print('Bh: ' + (' '.join('{:0.1f}'.format(b) for b in Bh)))
return het
def diprat(E0: np.ndarray, arc: np.ndarray, isimE0: np.ndarray):
dipratio = np.empty_like(E0)
for iE0 in np.arange(E0.size):
idip = arc[:isimE0[iE0], iE0].argmin(axis=0)
dipratio[iE0] = arc[idip, iE0]/arc[isimE0[iE0], iE0]
print('dipratio: ' + (' '.join(f'{d:0.2f}' for d in dipratio)))
# if not all(0.2<dipratio<0.5):
# warn('dipratio outside of 0.2<dipratio<0.5')
def gaussflux(E, Wb, E0, Q0):
Qc = Q0/(pi**(3/2) * Wb*E0)
return Qc * np.exp(-((E[:, None]-E0) / Wb)**2)
def writeh5(h5fn: Path, Phi: np.ndarray, E, fp):
if h5fn:
with h5py.File(h5fn, 'w') as f:
f.create_dataset('/diffnumflux', data=Phi)
hE = f.create_dataset('/E', data=E)
hE.attrs['Units'] = 'eV'
f.create_dataset('/diffnumflux_params', data=fp)
|
scivision/gridaurora
|
gridaurora/eFluxGen.py
|
hitail
|
python
|
def hitail(E: np.ndarray, diffnumflux: np.ndarray, isimE0: np.ndarray, E0: np.ndarray,
Bhf: np.ndarray, bh: float, verbose: int = 0):
Bh = np.empty_like(E0)
for iE0 in np.arange(E0.size):
Bh[iE0] = Bhf[iE0]*diffnumflux[isimE0[iE0], iE0] # 4100.
# bh = 4 #2.9
het = Bh*(E[:, None] / E0)**-bh
het[E[:, None] < E0] = 0.
if verbose > 0:
print('Bh: ' + (' '.join('{:0.1f}'.format(b) for b in Bh)))
return het
|
strickland 1993 said 0.2, but 0.145 gives better match to peak flux at 2500 = E0
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/eFluxGen.py#L90-L103
| null |
"""
Michael Hirsch
based on Strickland 1993
"""
import logging
from pathlib import Path
import numpy as np
import h5py
from typing import Tuple
pi = np.pi
def maxwellian(E: np.ndarray, E0: np.ndarray, Q0: np.ndarray) -> Tuple[np.ndarray, float]:
"""
input:
------
E: 1-D vector of energy bins [eV]
E0: characteristic energy (scalar or vector) [eV]
Q0: flux coefficient (scalar or vector) (to yield overall flux Q)
output:
-------
Phi: differential number flux
Q: total flux
Tanaka 2006 Eqn. 1
http://odin.gi.alaska.edu/lumm/Papers/Tanaka_2006JA011744.pdf
"""
E0 = np.atleast_1d(E0)
Q0 = np.atleast_1d(Q0)
assert E0.ndim == Q0.ndim == 1
assert (Q0.size == 1 or Q0.size == E0.size)
Phi = Q0/(2*pi*E0**3) * E[:, None] * np.exp(-E[:, None]/E0)
Q = np.trapz(Phi, E, axis=0)
logging.info('total maxwellian flux Q: ' + (' '.join('{:.1e}'.format(q) for q in Q)))
return Phi, Q
def fluxgen(E, E0, Q0, Wbc, bl, bm, bh, Bm, Bhf, verbose: int = 0) -> tuple:
Wb = Wbc*E0
isimE0 = abs(E - E0).argmin()
base = gaussflux(E, Wb, E0, Q0)
diffnumflux = base.copy()
low = letail(E, E0, Q0, bl, verbose)
diffnumflux += low # intermediate result
mid = midtail(E, E0, bm, Bm)
diffnumflux += mid # intermediate result
hi = hitail(E, diffnumflux, isimE0, E0, Bhf, bh, verbose)
diffnumflux += hi
if verbose > 0:
diprat(E0, diffnumflux, isimE0)
Q = np.trapz(diffnumflux, E, axis=0)
if verbose > 0:
print('total flux Q: ' + (' '.join('{:.1e}'.format(q) for q in Q)))
return np.asfortranarray(diffnumflux), low, mid, hi, base, Q
def letail(E: np.ndarray, E0: float, Q0: float, bl: float, verbose: int = 0) -> np.ndarray:
# for LET, 1<b<2
# Bl = 8200. #820 (typo?)
Bl = 0.4*Q0/(2*pi*E0**2) * np.exp(-1)
# bl = 1.0 #1
low = Bl * (E[:, None]/E0)**-bl
low[E[:, None] > E0] = 0.
if verbose > 0:
print('Bl: ' + (' '.join('{:0.1f}'.format(b) for b in Bl)))
return low
def midtail(E: np.ndarray, E0: np.ndarray, bm: float, Bm: float):
# Bm = 1.8e4 #1.8e4
# bm = 3. #3
mid = Bm*(E[:, None]/E0)**bm
mid[E[:, None] > E0] = 0.
return mid
def diprat(E0: np.ndarray, arc: np.ndarray, isimE0: np.ndarray):
dipratio = np.empty_like(E0)
for iE0 in np.arange(E0.size):
idip = arc[:isimE0[iE0], iE0].argmin(axis=0)
dipratio[iE0] = arc[idip, iE0]/arc[isimE0[iE0], iE0]
print('dipratio: ' + (' '.join(f'{d:0.2f}' for d in dipratio)))
# if not all(0.2<dipratio<0.5):
# warn('dipratio outside of 0.2<dipratio<0.5')
def gaussflux(E, Wb, E0, Q0):
Qc = Q0/(pi**(3/2) * Wb*E0)
return Qc * np.exp(-((E[:, None]-E0) / Wb)**2)
def writeh5(h5fn: Path, Phi: np.ndarray, E, fp):
if h5fn:
with h5py.File(h5fn, 'w') as f:
f.create_dataset('/diffnumflux', data=Phi)
hE = f.create_dataset('/E', data=E)
hE.attrs['Units'] = 'eV'
f.create_dataset('/diffnumflux_params', data=fp)
|
scivision/gridaurora
|
gridaurora/plots.py
|
plotOptMod
|
python
|
def plotOptMod(verNObg3gray, VERgray):
if VERgray is None and verNObg3gray is None:
return
fg = figure()
ax2 = fg.gca() # summed (as camera would see)
if VERgray is not None:
z = VERgray.alt_km
Ek = VERgray.energy_ev.values
# ax1.semilogx(VERgray, z, marker='',label='filt', color='b')
props = {'boxstyle': 'round', 'facecolor': 'wheat', 'alpha': 0.5}
fgs, axs = fg.subplots(6, 6, sharex=True, sharey='row')
axs = axs.ravel() # for convenient iteration
fgs.subplots_adjust(hspace=0, wspace=0)
fgs.suptitle('filtered VER/flux')
fgs.text(0.04, 0.5, 'Altitude [km]', va='center', rotation='vertical')
fgs.text(0.5, 0.04, 'Beam energy [eV]', ha='center')
for i, e in enumerate(Ek):
axs[i].semilogx(VERgray.loc[:, e], z)
axs[i].set_xlim((1e-3, 1e4))
# place a text box in upper left in axes coords
axs[i].text(0.95, 0.95, '{:0.0f}'.format(e)+'eV',
transform=axs[i].transAxes, fontsize=12,
va='top', ha='right', bbox=props)
for i in range(33, 36):
axs[i].axis('off')
ax2.semilogx(VERgray.sum(axis=1), z, label='filt', color='b')
# specific to energies
ax = figure().gca()
for e in Ek:
ax.semilogx(VERgray.loc[:, e], z, marker='', label='{:.0f} eV'.format(e))
ax.set_title('filtered VER/flux')
ax.set_xlabel('VER/flux')
ax.set_ylabel('altitude [km]')
ax.legend(loc='best', fontsize=8)
ax.set_xlim((1e-5, 1e5))
ax.grid(True)
if verNObg3gray is not None:
ax1 = figure().gca() # overview
z = verNObg3gray.alt_km
Ek = verNObg3gray.energy_ev.values
ax1.semilogx(verNObg3gray, z, marker='', label='unfilt', color='r')
ax2.semilogx(verNObg3gray.sum(axis=1), z, label='unfilt', color='r')
ax = figure().gca()
for e in Ek:
ax.semilogx(verNObg3gray.loc[:, e], z, marker='', label='{:.0f} eV'.format(e))
ax.set_title('UNfiltered VER/flux')
ax.set_xlabel('VER/flux')
ax.set_ylabel('altitude [km]')
ax.legend(loc='best', fontsize=8)
ax.set_xlim((1e-5, 1e5))
ax.grid(True)
ax1.set_title('VER/flux, one profile per beam')
ax1.set_xlabel('VER/flux')
ax1.set_ylabel('altitude [km]')
ax1.grid(True)
ax2.set_xlabel('VER/flux')
ax2.set_ylabel('altitude [km]')
ax2.set_title('VER/flux summed over all energy beams \n (as the camera would see)')
ax2.legend(loc='best')
ax2.grid(True)
|
called from either readTranscar.py or hist-feasibility/plotsnew.py
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/plots.py#L260-L331
| null |
import logging
from datetime import datetime
from pathlib import Path
import h5py
import xarray
from numpy.ma import masked_invalid # for pcolormesh, which doesn't like NaN
from matplotlib.pyplot import figure, draw, close
from matplotlib.colors import LogNorm
from matplotlib.ticker import MultipleLocator
from matplotlib.dates import SecondLocator, DateFormatter, MinuteLocator
from typing import List
import numpy as np
import os
import gridaurora.ztanh as ga
import gridaurora.opticalmod as gao
if os.name == 'nt':
import pathvalidate
else:
pathvalidate = None
# IEEE Transactions requires 600 dpi
dymaj = 100
dymin = 20
def writeplots(fg, plotprefix, tind=None, odir=None, fmt='.png', anno=None, dpi=None, facecolor=None, doclose=True):
try:
if fg is None or odir is None:
return
# %%
draw() # Must have this here or plot doesn't update in animation multiplot mode!
# TIF was not faster and was 100 times the file size!
# PGF is slow and big file,
# RAW crashes
# JPG no faster than PNG
suff = nametime(tind)
if anno:
fg.text(0.15, 0.8, anno, fontsize='x-large')
if pathvalidate is not None:
cn = Path(odir).expanduser() / pathvalidate.sanitize_filename(plotprefix + suff + fmt)
else:
cn = Path(odir).expanduser() / (plotprefix + suff + fmt)
print('write', cn)
if facecolor is None:
facecolor = fg.get_facecolor()
fg.savefig(cn, bbox_inches='tight', dpi=dpi, facecolor=facecolor, edgecolor='none')
if doclose:
close(fg)
except Exception as e:
logging.error(f'{e} when plotting {plotprefix}')
def nametime(tind):
if isinstance(tind, int) and tind < 1e6:
return '{:03d}'.format(tind)
elif isinstance(tind, datetime):
return tind.isoformat()[:-3] # -3 truncates to millisecond digits only (arbitrary)
elif tind is not None:
return str(tind)
else: # is None
return ''
# %%
def plotflux(E, E0, arc, base=None, hi=None, low=None, mid=None, ttxt='Differential Number Flux'):
FMAX = 1e6
FMIN = 1e2
lblstr = ['{:.0f}'.format(e0) for e0 in E0]
ax = figure().gca()
if np.isscalar(E0) and mid is not None:
ax.loglog(E, hi, 'k:')
ax.loglog(E, low, 'k:')
ax.loglog(E, mid, 'k:')
ax.loglog(E, base, color='k')
ax.loglog(E, arc, linewidth=2)
ax.grid(True, which='both')
ax.set_xlabel('Electron Energy [eV]') # ,fontsize=afs,labelpad=-2)
ax.set_ylabel('Flux [cm$^{-2}$s$^{-1}$eV$^{-1}$sr$^{-1}$]') # ,fontsize=afs)
ax.set_title(ttxt)
# ax.tick_params(axis='both', which='both')
ax.autoscale(True, tight=True)
ax.set_ylim((1e2, FMAX))
ax.legend(lblstr, loc='best') # ,prop={'size':'large'})
# ax.set_xlim((1e2,1e4))
# sns.despine(ax=ax)
if base is not None:
ax = figure().gca()
ax.loglog(E, base)
ax.set_ylim((FMIN, FMAX))
# ax.set_xlim((1e2,1e4))
ax.set_title('arc Gaussian base function, E0=' + str(E0) + '[eV]' +
'\n Wbc: width, Q0: height')
ax.set_xlabel('Electron Energy [eV]')
ax.set_ylabel('Flux [cm$^{-2}$s$^{-1}$eV$^{-1}$sr$^{-1}$]')
ax.legend(lblstr)
ax = figure().gca()
ax.loglog(E, low)
ax.set_ylim((FMIN, FMAX))
ax.set_title('arc low (E<E0). Bl: height, bh: slope')
ax.set_xlabel('Electron Energy [eV]')
ax.set_ylabel('Flux [cm$^{-2}$s$^{-1}$eV$^{-1}$sr$^{-1}$]')
ax = figure().gca()
ax.loglog(E, mid)
ax.set_ylim((FMIN, FMAX))
ax.set_title('arc mid. Bm:height, bm: slope')
ax.set_xlabel('Electron Energy [eV]')
ax.set_ylabel('Flux [cm$^{-2}$s$^{-1}$eV$^{-1}$sr$^{-1}$]')
ax = figure().gca()
ax.loglog(E, hi)
ax.set_ylim((FMIN, FMAX))
ax.set_title('arc hi (E>E0). Bhf: height, bh: slope')
ax.set_xlabel('Electron Energy [eV]')
ax.set_ylabel('Flux [cm$^{-2}$s$^{-1}$eV$^{-1}$sr$^{-1}$]')
# %%
def ploteigver(EKpcolor, zKM, eigenprofile,
vlim=(None,)*6, sim=None, tInd=None, makeplot=None, prefix=None, progms=None):
try:
fg = figure()
ax = fg.gca()
# pcolormesh canNOT handle nan at all
pcm = ax.pcolormesh(EKpcolor, zKM, masked_invalid(eigenprofile),
edgecolors='none', # cmap=pcmcmap,
norm=LogNorm(),
vmin=vlim[4], vmax=vlim[5])
ax.set_xlabel('Energy [eV]')
ax.set_ylabel(r'$B_\parallel$ [km]')
ax.autoscale(True, tight=True)
ax.set_xscale('log')
ax.yaxis.set_major_locator(MultipleLocator(dymaj))
ax.yaxis.set_minor_locator(MultipleLocator(dymin))
# %% title
if tInd is not None:
mptitle = str(tInd)
else:
mptitle = ''
mptitle += '$P_{{eig}}$'
if sim:
mptitle += ', filter: {}'.format(sim.opticalfilter)
mptitle += str(sim.reacreq)
ax.set_title(mptitle) # ,fontsize=tfs)
# %% colorbar
cbar = fg.colorbar(pcm, ax=ax)
cbar.set_label('[photons cm$^{-3}$s$^{-1}$]', labelpad=0) # ,fontsize=afs)
# cbar.ax.tick_params(labelsize=afs)
# cbar.ax.yaxis.get_offset_text().set_size(afs)
# %% ticks,lim
ax.tick_params(axis='both', which='both', direction='out')
ax.set_ylim(vlim[2:4])
# %%
writeplots(fg, prefix, tInd, makeplot, progms)
except Exception as e:
logging.error('tind {} {}'.format(tInd, e))
def plotT(T, mmsl):
ax1 = figure().gca()
for c in ['filter', 'window', 'qe', 'atm']:
ax1.plot(T.wavelength_nm, T[c], label=c)
ax1.set_xlim(mmsl[:2])
ax1.set_title(f'{T.filename} Component transmittance')
#
ax2 = figure().gca()
for s in ['sys', 'sysNObg3']:
ax2.plot(T.wavelength_nm, T[s], label=s)
ax2.set_title(f'{T.filename} System Transmittance')
for a in (ax1, ax2):
niceTax(a)
def niceTax(a):
a.set_xlabel('wavelength (nm)')
a.set_ylabel('Transmittance (unitless)')
# a.set_yscale('log')
a.legend(loc='best')
# a.set_ylim(1e-2,1)
a.invert_xaxis()
a.grid(True, which='both')
def comparefilters(Ts):
fg = figure()
axs = fg.subplots(len(Ts), 1, sharex=True, sharey=True)
for T, ax in zip(Ts, axs):
try:
ax.plot(T.wavelength_nm, T['filter'], label=T.filename)
except ValueError: # just a plain filter
assert T.ndim == 1
ax.plot(T.wavelength_nm, T, label=T.filename)
forbidden = [630., 555.7, ]
permitted = [391.4, 427.8, 844.6, 777.4]
for l in forbidden:
ax.axvline(l, linestyle='--', color='darkred', alpha=0.8)
for l in permitted:
ax.axvline(l, linestyle='--', color='darkgreen', alpha=0.8)
ax.set_title(f'{T.filename}')
fg.suptitle('Transmittance')
ax.set_ylim((0, 1))
ax.set_xlim(T.wavelength_nm[[-1, 0]])
ax.set_xlabel('wavelength [nm]')
def plotz(z: np.ndarray):
dz = np.gradient(z, edge_order=2) # numpy>=1.9.1
dzmed = np.median(dz)
ax = figure().gca()
ax.plot(dz, z)
ax.axvline(dzmed, color='r', linestyle='--', label='median')
ax.set_xlabel('grid spacing [km]')
ax.set_ylabel('altitude [km]')
ax.set_title('$N_p=$'+str(z.shape[0]))
ax.grid(True)
ax.legend(loc='best')
if __name__ == '__main__':
from matplotlib.pyplot import show
from argparse import ArgumentParser
p = ArgumentParser(description='create continuously step sized grid')
p.add_argument('-n', '--np', help='number of points in grid', type=int, default=300)
p.add_argument('--zmin', help='bottom of grid', type=float, default=90)
p.add_argument('--gmin', help='minimum grid spacing', type=float, default=1.5)
p.add_argument('--gmax', help='max grid spacing', type=float, default=10.575)
a = p.parse_args()
zgrid = ga.setupz(a.np, a.zmin, a.gmin, a.gmax)
plotz(zgrid)
print(zgrid[-1])
show()
# %% HIST
def comparejgr2013(altkm, zenang, bg3fn, windfn, qefn):
R = Path(__file__).parent
with h5py.File(R / 'precompute/trans_jgr2013a.h5', 'r') as f:
reqLambda = f['/lambda'][:]
Tjgr2013 = f['/T'][:]
optT = gao.getSystemT(reqLambda, bg3fn, windfn, qefn, altkm, zenang)
ax = figure().gca()
ax.semilogy(reqLambda, optT['sys'], 'b', label='HST')
ax.semilogy(reqLambda, Tjgr2013, 'r', label='JGR2013')
ax.set_xlabel('wavelength [nm]')
ax.set_ylabel('T')
ax.set_title('Comparision of Transmission models: HST vs. JGR2013')
ax.grid(True)
ax.legend(loc='best')
ax.set_title('System Transmission + Atmospheric Absorption')
ax.set_ylim(1e-10, 1)
def plotAllTrans(optT, log):
mutwl = optT.wavelength_nm
fg = figure(figsize=(7, 5))
ax = fg.gca()
ax.plot(mutwl, optT['sys'], label='optics')
ax.plot(mutwl, optT['atm'], label='atmosphere')
ax.plot(mutwl, optT['sys']*optT['atm'], label='total', linewidth=2)
if log:
ax.set_yscale('log')
ax.set_ylim(bottom=1e-5)
ax.set_xlabel('wavelength [nm]')
ax.set_ylabel('Transmission [dimensionless]')
ax.set_title('System Transmission')
ax.grid(True, 'both')
ax.invert_xaxis()
ax.xaxis.set_major_locator(MultipleLocator(100))
ax.legend(loc='center', bbox_to_anchor=(0.3, 0.15))
return fg
def plotPeigen(Peigen):
# Peigen: Nalt x Nenergy
if not isinstance(Peigen, xarray.DataArray):
return
fg = figure()
ax = fg.gca()
pcm = ax.pcolormesh(Peigen.energy_ev,
Peigen.alt_km,
Peigen.values)
ax.autoscale(True, tight=True)
ax.set_xscale('log')
ax.set_xlabel('beam energy [eV]')
ax.set_ylabel('altitude [km]')
ax.set_title('Volume Emission Rate per unit diff num flux')
fg.colorbar(pcm)
def showIncrVER(tTC: np.ndarray, tReqInd: int, tctime: np.ndarray,
ver: xarray.DataArray, tver: xarray.DataArray,
titxt: str, makePlots: List[str]):
saveplot = False
z = ver.alt_km
lamb = ver.wavelength
# if 'spectra1d' in makePlots:
# b = np.trapz(ver, z, axis=1) # integrate along z, looking up magnetic zenith
# plotspectra(b, lamb)
if 'eigtime' in makePlots:
fg = figure(figsize=(11, 8), dpi=100, tight_layout=True)
ax = fg.gca()
pcm = ax.pcolormesh(tTC, z, tver.sum(axis=0), # sum over wavelength
edgecolors='none', cmap=None, norm=None,
vmin=0, vmax=1e3)
ax.axvline(tTC[tReqInd], color='white', linestyle='--', label='Req. Time')
ax.axvline(tctime['tstartPrecip'], color='red', linestyle='--', label='Precip. Start')
ax.axvline(tctime['tendPrecip'], color='red', linestyle='--', label='Precip. End')
titlemean = titxt + (r'\n VER/flux: $\lambda \in$' +
str(lamb) + ' [nm]' +
'\n geodetic lat:' + str(tctime['latgeo_ini'])
+ ' lon:' + str(tctime['longeo_ini']) +
' date: ' + tctime['dayofsim'].strftime('%Y-%m-%d'))
# make room for long title
fg.subplots_adjust(top=0.8)
ax.set_title(titlemean, fontsize=9)
ax.yaxis.set_major_locator(MultipleLocator(100))
ax.yaxis.set_minor_locator(MultipleLocator(20))
# ax.xaxis.set_major_locator(MinuteLocator(interval=10))
ax.xaxis.set_major_locator(MinuteLocator(interval=1))
ax.xaxis.set_minor_locator(SecondLocator(interval=10))
ax.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
ax.tick_params(axis='both', which='both', direction='out', labelsize=12)
ax.autoscale(True, tight=True)
cbar = fg.colorbar(pcm)
cbar.set_label('VER/flux', labelpad=0)
ax.set_xlabel('Time [UTC]')
ax.set_ylabel('altitude [km]')
if saveplot:
sfn = ''.join(e for e in titxt if e.isalnum() or e == '.') # remove special characters
fg.savefig('out/VER' + sfn + '.png', dpi=150, bbox_inches='tight')
close(fg)
if 'eigtime1d' in makePlots:
fg = figure(figsize=(11, 8), dpi=100)
ax = fg.gca()
# fg.subplots_adjust(top=0.85)
thistitle = titxt + ': {:d} emission lines\n VER/flux: geodetic lat: {} lon: {} {}'.format(
ver.shape[0], tctime['latgeo_ini'], tctime['longeo_ini'], tTC[tReqInd])
ax.set_title(thistitle, fontsize=12)
ax.set_xlabel('VER/flux')
ax.set_ylabel('altitude [km]')
for ifg, clamb in enumerate(lamb):
ax.semilogx(ver.iloc[ifg, :], z, label=str(clamb))
ax.yaxis.set_major_locator(MultipleLocator(100))
ax.yaxis.set_minor_locator(MultipleLocator(20))
ax.grid(True)
if ver.shape[0] < 20:
ax.legend(loc='upper center', bbox_to_anchor=(1.05, .95),
ncol=1, fancybox=True, shadow=True, fontsize=9)
ax.tick_params(axis='both', which='both', direction='in', labelsize=12)
ax.set_xlim(1e-9, 1e3)
ax.set_ylim((z[0], z[-1]))
if saveplot:
sfn = ''.join(e for e in titxt if e.isalnum()) # remove special characters
fg.savefig('out/VER' + sfn + '.png', dpi=150, bbox_inches='tight')
close(fg)
def plotspectra(br, optT: xarray.DataArray, E: float, lambminmax: tuple):
spectraAminmax = (1e-1, 8e5) # for plotting
spectrallines = (391.44, 427.81, 557.7, 630.0, 777.4, 844.6) # 297.2, 636.4,762.0, #for plotting
lamb = optT.wavelength_nm
def _plotspectrasub(ax, bf, txt):
ax.set_yscale('log')
ax.set_title('Auroral spectrum, ' + txt +
f',integrated along flux tube: $E_0$ = {E:.0f} eV')
ax.set_ylabel('optical intensity')
ax.set_xlim(lambminmax)
ax.set_ylim(spectraAminmax)
ax.xaxis.set_major_locator(MultipleLocator(100))
# ax.invert_xaxis()
for l in spectrallines:
ax.text(l, bf[l]*1.7, '{:.1f}'.format(l),
ha='center', va='bottom', fontsize='medium', rotation=60)
# %%
fg = figure()
ax1, ax2 = fg.subplots(2, 1, sharex=True, figsize=(10, 8))
bf = br*optT['sysNObg3']
ax1.stem(lamb, bf)
_plotspectrasub(ax1, bf, 'no filter')
bf = br*optT['sys']
ax2.stem(lamb, bf)
_plotspectrasub(ax2, bf, 'BG3 filter')
ax2.set_xlabel('wavelength [nm]')
return fg
|
scivision/gridaurora
|
gridaurora/opticalmod.py
|
opticalModel
|
python
|
def opticalModel(sim, ver: xarray.DataArray, obsAlt_km: float, zenithang: float):
assert isinstance(ver, xarray.DataArray)
# %% get system optical transmission T
optT = getSystemT(ver.wavelength_nm, sim.bg3fn, sim.windowfn, sim.qefn, obsAlt_km, zenithang)
# %% first multiply VER by T, THEN sum overall wavelengths
if sim.opticalfilter == 'bg3':
VERgray = (ver*optT['sys'].values[None, :]).sum('wavelength_nm')
elif sim.opticalfilter == 'none':
VERgray = (ver*optT['sysNObg3'].values[None, :]).sum('wavelength_nm')
else:
logging.warning(f'unknown OpticalFilter type: {sim.opticalfilter}'
' falling back to using no filter at all')
VERgray = (ver*optT['sysNObg3'].values[None, :]).sum('wavelength_nm')
return VERgray
|
ver: Nalt x Nwavelength
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/opticalmod.py#L7-L25
|
[
"def getSystemT(newLambda, bg3fn: Path, windfn: Path, qefn: Path,\n obsalt_km, zenang_deg, verbose: bool = False) -> xarray.Dataset:\n\n bg3fn = Path(bg3fn).expanduser()\n windfn = Path(windfn).expanduser()\n qefn = Path(qefn).expanduser()\n\n newLambda = np.asarray(newLambda)\n# %% atmospheric absorption\n if lowtran is not None:\n c1 = {'model': 5, 'h1': obsalt_km, 'angle': zenang_deg,\n 'wlshort': newLambda[0], 'wllong': newLambda[-1]}\n if verbose:\n print('loading LOWTRAN7 atmosphere model...')\n atmT = lowtran.transmittance(c1)['transmission'].squeeze()\n try:\n atmTcleaned = atmT.values.squeeze()\n atmTcleaned[atmTcleaned == 0] = np.spacing(1) # to avoid log10(0)\n fwl = interp1d(atmT.wavelength_nm, np.log(atmTcleaned), axis=0)\n except AttributeError: # problem with lowtran\n fwl = interp1d(newLambda, np.log(np.ones_like(newLambda)), kind='linear')\n else:\n fwl = interp1d(newLambda, np.log(np.ones_like(newLambda)), kind='linear')\n\n atmTinterp = np.exp(fwl(newLambda))\n if not np.isfinite(atmTinterp).all():\n logging.error('problem in computing LOWTRAN atmospheric attenuation, results are suspect!')\n# %% BG3 filter\n with h5py.File(bg3fn, 'r') as f:\n try:\n assert isinstance(f['/T'], h5py.Dataset), 'we only allow one transmission curve per file' # simple legacy behavior\n fbg3 = interp1d(f['/wavelength'], np.log(f['/T']), kind='linear', bounds_error=False)\n except KeyError:\n raise KeyError('could not find /wavelength in {}'.format(f.filename))\n\n try:\n fname = f['T'].attrs['name'].item()\n if isinstance(fname, bytes):\n fname = fname.decode('utf8')\n except KeyError:\n fname = ''\n# %% camera window\n with h5py.File(windfn, 'r') as f:\n fwind = interp1d(f['/lamb'], np.log(f['/T']), kind='linear')\n# %% quantum efficiency\n with h5py.File(qefn, 'r') as f:\n fqe = interp1d(f['/lamb'], np.log(f['/QE']), kind='linear')\n# %% collect results into DataArray\n\n T = xarray.Dataset({'filter': ('wavelength_nm', np.exp(fbg3(newLambda))),\n 'window': ('wavelength_nm', np.exp(fwind(newLambda))),\n 'qe': ('wavelength_nm', np.exp(fqe(newLambda))),\n 'atm': ('wavelength_nm', atmTinterp), },\n coords={'wavelength_nm': newLambda},\n attrs={'filename': fname})\n\n T['sysNObg3'] = T['window'] * T['qe'] * T['atm']\n T['sys'] = T['sysNObg3'] * T['filter']\n\n return T\n"
] |
#!/usr/bin/env python
import logging
import xarray
from .filterload import getSystemT
|
scivision/gridaurora
|
MakeIonoEigenprofile.py
|
main
|
python
|
def main():
p = ArgumentParser(description='Makes unit flux eV^-1 as input to GLOW or Transcar to create ionospheric eigenprofiles')
p.add_argument('-i', '--inputgridfn', help='original Zettergren input flux grid to base off of', default='zettflux.csv')
p.add_argument('-o', '--outfn', help='hdf5 file to write with ionospheric response (eigenprofiles)')
p.add_argument('-t', '--simtime', help='yyyy-mm-ddTHH:MM:SSZ time of sim', nargs='+', default=['1999-12-21T00:00:00Z'])
p.add_argument('-c', '--latlon', help='geodetic latitude/longitude (deg)', type=float, nargs=2, default=[65, -148.])
# p.add_argument('-m', '--makeplot', help='show to show plots, png to save pngs of plots', nargs='+', default=['show'])
p.add_argument('-M', '--model', help='specify auroral model (glow,rees,transcar)', default='glow')
p.add_argument('-z', '--zlim', help='minimum,maximum altitude [km] to plot', nargs=2, default=(None, None), type=float)
p.add_argument('--isotropic', help='(rees model only) isotropic or non-isotropic pitch angle', action='store_true')
p.add_argument('--vlim', help='plotting limits on energy dep and production plots', nargs=2, type=float, default=(1e-7, 1e1))
p = p.parse_args()
if not p.outfn:
print('you have not specified an output file with -o options, so I will only plot and not save result')
# makeplot = p.makeplot
if len(p.simtime) == 1:
T = [parse(p.simtime[0])]
elif len(p.simtime) == 2:
T = list(rrule.rrule(rrule.HOURLY,
dtstart=parse(p.simtime[0]),
until=parse(p.simtime[1])))
# %% input unit flux
Egrid = loadregress(Path(p.inputgridfn).expanduser())
Ebins = makebin(Egrid)[:3]
EKpcolor, EK, diffnumflux = ekpcolor(Ebins)
# %% ionospheric response
model = p.model.lower()
glat, glon = p.latlon
if model == 'glow':
ver, photIon, isr, phitop, zceta, sza, prates, lrates, tezs, sion = makeeigen(EK, diffnumflux, T, p.latlon,
p.makeplot, p.outfn, p.zlim)
writeeigen(p.outfn, EKpcolor, T, ver.z_km, diffnumflux, ver, prates, lrates, tezs, p.latlon)
# %% plots
# input
doplot(p.inputgridfn, Ebins)
# output
sim = namedtuple('sim', ['reacreq', 'opticalfilter'])
sim.reacreq = sim.opticalfilter = ''
for t in ver: # TODO for each time
# VER eigenprofiles, summed over wavelength
ploteigver(EKpcolor, ver.z_km, ver.sum('wavelength_nm'),
(None,)*6, sim,
'{} Vol. Emis. Rate '.format(t))
# volume production rate, summed over reaction
plotprodloss(prates.loc[:, 'final', ...].sum('reaction'),
lrates.loc[:, 'final', ...].sum('reaction'),
t, glat, glon, p.zlim)
# energy deposition
plotenerdep(tezs, t, glat, glon, p.zlim)
elif model == 'rees':
assert len(T) == 1, 'only one time with rees for now.'
z = glowalt()
q = reesiono(T, z, Ebins.loc[:, 'low'], glat, glon, p.isotropic)
writeeigen(p.outfn, Ebins, T, z, prates=q, tezs=None, latlon=(glat, glon))
plotA(q, 'Volume Production Rate {} {} {}'.format(T, glat, glon), p.vlim)
elif model == 'transcar':
raise NotImplementedError('Transcar by request')
else:
raise NotImplementedError('I am not yet able to handle your model {}'.format(model))
# %% plots
show()
|
three output eigenprofiles
1) ver (optical emissions) 4-D array: time x energy x altitude x wavelength
2) prates (production) 4-D array: time x energy x altitude x reaction
3) lrates (loss) 4-D array: time x energy x altitude x reaction
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/MakeIonoEigenprofile.py#L35-L112
|
[
"def ploteigver(EKpcolor, zKM, eigenprofile,\n vlim=(None,)*6, sim=None, tInd=None, makeplot=None, prefix=None, progms=None):\n try:\n fg = figure()\n ax = fg.gca()\n # pcolormesh canNOT handle nan at all\n pcm = ax.pcolormesh(EKpcolor, zKM, masked_invalid(eigenprofile),\n edgecolors='none', # cmap=pcmcmap,\n norm=LogNorm(),\n vmin=vlim[4], vmax=vlim[5])\n ax.set_xlabel('Energy [eV]')\n ax.set_ylabel(r'$B_\\parallel$ [km]')\n ax.autoscale(True, tight=True)\n ax.set_xscale('log')\n ax.yaxis.set_major_locator(MultipleLocator(dymaj))\n ax.yaxis.set_minor_locator(MultipleLocator(dymin))\n# %% title\n if tInd is not None:\n mptitle = str(tInd)\n else:\n mptitle = ''\n mptitle += '$P_{{eig}}$'\n if sim:\n mptitle += ', filter: {}'.format(sim.opticalfilter)\n mptitle += str(sim.reacreq)\n\n ax.set_title(mptitle) # ,fontsize=tfs)\n# %% colorbar\n cbar = fg.colorbar(pcm, ax=ax)\n cbar.set_label('[photons cm$^{-3}$s$^{-1}$]', labelpad=0) # ,fontsize=afs)\n # cbar.ax.tick_params(labelsize=afs)\n # cbar.ax.yaxis.get_offset_text().set_size(afs)\n# %% ticks,lim\n ax.tick_params(axis='both', which='both', direction='out')\n ax.set_ylim(vlim[2:4])\n# %%\n writeplots(fg, prefix, tInd, makeplot, progms)\n except Exception as e:\n logging.error('tind {} {}'.format(tInd, e))\n",
"def doplot(fn: Path, bins: xarray.DataArray, Egrid: np.ndarray = None, debug: bool = False):\n # %% main plot\n ax = figure().gca()\n ax.bar(left=bins.loc[:, 'low'],\n height=bins.loc[:, 'flux'],\n width=bins.loc[:, 'high']-bins.loc[:, 'low'])\n ax.set_yscale('log')\n ax.set_xscale('log')\n ax.set_ylabel('flux [s$^{-1}$ sr$^{-1}$ cm$^{-2}$ eV$^{-1}$]')\n ax.set_xlabel('bin energy [eV]')\n ax.set_title(f'Input flux used to generate eigenprofiles, based on {fn}')\n\n# %% debug plots\n if debug:\n ax = figure().gca()\n bins[['low', 'high']].plot(logy=True, ax=ax, marker='.')\n ax.set_xlabel('bin number')\n ax.set_ylabel('bin energy [eV]')\n\n ax = figure().gca()\n bins['flux'].plot(logy=True, ax=ax, marker='.')\n ax.set_xlabel('bin number')\n ax.set_ylabel('flux [s$^{-1}$ sr$^{-1}$ cm$^{-2}$ eV$^{-1}$]')\n\n if Egrid is not None:\n ax = figure().gca()\n ax.plot(Egrid, marker='.')\n # ax.plot(Ematt,marker='.',color='k')\n ax.set_yscale('log')\n ax.set_ylabel('eV')\n ax.legend(['E1', 'E2', 'pr1', 'pr2'], loc='best')\n",
"def loadregress(fn: Path):\n # %%\n Egrid = np.loadtxt(Path(fn).expanduser(), delimiter=',')\n# Ematt = asarray([logspace(1.7220248253079387,4.2082263059355824,num=Nold,base=10),\n# #[logspace(3.9651086925197356,9.689799159992674,num=33,base=exp(1)),\n# logspace(1.8031633895706722,4.2851520785250914,num=Nold,base=10)]).T\n# %% log-lin regression\n Enew = np.empty((Nnew, 4))\n Enew[:Nold, :] = Egrid\n for k in range(4):\n s, i = linregress(range(Nold), np.log10(Egrid[:, k]))[:2]\n Enew[Nold:, k] = 10**(np.arange(Nold, Nnew)*s+i)\n\n return Enew\n",
"def makebin(Egrid: np.ndarray):\n E1 = Egrid[:, 0]\n E2 = Egrid[:, 1]\n pr1 = Egrid[:, 2]\n pr2 = Egrid[:, 3]\n\n dE = E2-E1\n Esum = E2+E1\n flux = flux0 / 0.5 / Esum / dE\n Elow = E1 - 0.5*(E1 - pr1)\n Ehigh = E2 - 0.5*(E2 - pr2)\n\n E = np.column_stack((Elow, Ehigh, flux))\n\n Ed = xarray.DataArray(data=E, dims=['energy', 'type'])\n Ed['type'] = ['low', 'high', 'flux']\n\n return Ed\n",
"def writeeigen(fn: Path, Ebins, t, z, diffnumflux=None, ver=None, prates=None, lrates=None,\n tezs=None, latlon=None):\n if not fn:\n return\n\n fn = Path(fn).expanduser()\n\n if fn.suffix != '.h5':\n return\n\n print('writing to', fn)\n\n ut1_unix = to_ut1unix(t)\n\n with h5py.File(fn, 'w') as f:\n bdt = h5py.special_dtype(vlen=bytes)\n d = f.create_dataset('/sensorloc', data=latlon)\n d.attrs['unit'] = 'degrees'\n d.attrs['description'] = 'geographic coordinates'\n# %% input precipitation flux\n d = f.create_dataset('/Ebins', data=Ebins)\n d.attrs['unit'] = 'eV'\n d.attrs['description'] = 'Energy bin edges'\n d = f.create_dataset('/altitude', data=z)\n d.attrs['unit'] = 'km'\n\n d = f.create_dataset('/ut1_unix', data=ut1_unix)\n d.attrs['unit'] = 'sec. since Jan 1, 1970 midnight' # float\n\n if diffnumflux is not None:\n d = f.create_dataset('/diffnumflux', data=diffnumflux)\n d.attrs['unit'] = 'cm^-2 s^-1 eV^-1'\n d.attrs['description'] = 'primary electron flux at \"top\" of modeled ionosphere'\n# %% VER\n if isinstance(ver, DataArray):\n d = f.create_dataset('/ver/eigenprofile', data=ver.values, compression='gzip')\n d.attrs['unit'] = 'photons cm^-3 sr^-1 s^-1'\n d.attrs['size'] = 'Ntime x NEnergy x Nalt x Nwavelength'\n\n d = f.create_dataset('/ver/wavelength', data=ver.wavelength_nm)\n d.attrs['unit'] = 'Angstrom'\n# %% prod\n if isinstance(prates, DataArray):\n d = f.create_dataset('/prod/eigenprofile', data=prates.values, compression='gzip')\n d.attrs['unit'] = 'particle cm^-3 sr^-1 s^-1'\n if prates.ndim == 3:\n d.attrs['size'] = 'Ntime x NEnergy x Nalt'\n else: # ndim==4\n d.attrs['size'] = 'Ntime x NEnergy x Nalt x Nreaction'\n d = f.create_dataset('/prod/reaction', data=prates.reaction, dtype=bdt)\n d.attrs['description'] = 'reaction species state'\n# %% loss\n if isinstance(lrates, DataArray):\n d = f.create_dataset('/loss/eigenprofiles', data=lrates.values, compression='gzip')\n d.attrs['unit'] = 'particle cm^-3 sr^-1 s^-1'\n d.attrs['size'] = 'Ntime x NEnergy x Nalt x Nreaction'\n d = f.create_dataset('/loss/reaction', data=lrates.reaction, dtype=bdt)\n d.attrs['description'] = 'reaction species state'\n# %% energy deposition\n if isinstance(tezs, DataArray):\n d = f.create_dataset('/energydeposition', data=tezs.values, compression='gzip')\n d.attrs['unit'] = 'ergs cm^-3 s^-1'\n d.attrs['size'] = 'Ntime x Nalt x NEnergies'\n",
"def glowalt() -> np.ndarray:\n # z = range(80,110+1,1)\n z = np.arange(30., 110+1., 1.)\n z = np.append(z, [111.5, 113., 114.5, 116.])\n z = np.append(z, np.arange(118, 150+2, 2.))\n z = np.append(z, np.arange(153, 168+3, 3.))\n z = np.append(z, np.arange(172, 180+4, 4.))\n z = np.append(z, np.arange(185, 205+5, 5))\n z = np.append(z, np.arange(211, 223+6, 6))\n z = np.append(z, np.arange(230, 244+7, 7))\n z = np.append(z, np.arange(252, 300+8, 8))\n z = np.append(z, np.arange(309, 345+9, 9))\n z = np.append(z, np.arange(355, 395+10, 10))\n z = np.append(z, np.arange(406, 428+11, 11))\n z = np.append(z, [440., 453, 467, 482, 498, 515, 533, 551])\n z = np.append(z, np.arange(570, 950+20, 20))\n\n return z\n"
] |
#!/usr/bin/env python
"""
Computes Eigenprofiles of Ionospheric response to flux tube input via the following steps:
1. Generate unit input differential number flux vs. energy
2. Compute ionospheric energy deposition and hence production/loss rates for the modeled kinetic chemistries (12 in total)
unverified for proper scaling, fitted exponential curve to extrapolate original
Zettergren grid from 50eV-18keV up to 100MeV
example:
python MakeIonoEigenprofile.py -t 2013-01-31T09:00:00Z -c 65 -148 -o ~/data/eigen.h5
Michael Hirsch
"""
from argparse import ArgumentParser
from gridaurora.loadtranscargrid import loadregress, makebin, doplot
from gridaurora.writeeigen import writeeigen
from gridaurora.zglow import glowalt
from glowaurora.eigenprof import makeeigen, ekpcolor
from glowaurora.plots import plotprodloss, plotenerdep
from gridaurora.plots import ploteigver
from reesaurora.rees_model import reesiono
from reesaurora.plots import plotA
from pathlib import Path
from collections import namedtuple
from matplotlib.pyplot import show
from dateutil import rrule
from dateutil.parser import parse
import seaborn as sns # optional pretty plots
sns.color_palette(sns.color_palette("cubehelix"))
sns.set(context='talk', style='whitegrid')
sns.set(rc={'image.cmap': 'cubehelix_r'}) # for contour
if __name__ == '__main__':
main()
|
scivision/gridaurora
|
gridaurora/ztanh.py
|
setupz
|
python
|
def setupz(Np: int, zmin: float, gridmin: float, gridmax: float) -> np.ndarray:
dz = _ztanh(Np, gridmin, gridmax)
return np.insert(np.cumsum(dz)+zmin, 0, zmin)[:-1]
|
np: number of grid points
zmin: minimum STEP SIZE at minimum grid altitude [km]
gridmin: minimum altitude of grid [km]
gridmax: maximum altitude of grid [km]
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/ztanh.py#L9-L19
|
[
"def _ztanh(Np: int, gridmin: float, gridmax: float) -> np.ndarray:\n \"\"\"\n typically call via setupz instead\n \"\"\"\n x0 = np.linspace(0, 3.14, Np) # arbitrarily picking 3.14 as where tanh gets to 99% of asymptote\n return np.tanh(x0)*gridmax+gridmin\n"
] |
#!/usr/bin/env python
"""
inspired by Matt Zettergren
Michael Hirsch
"""
import numpy as np
def _ztanh(Np: int, gridmin: float, gridmax: float) -> np.ndarray:
"""
typically call via setupz instead
"""
x0 = np.linspace(0, 3.14, Np) # arbitrarily picking 3.14 as where tanh gets to 99% of asymptote
return np.tanh(x0)*gridmax+gridmin
# def zexp(np,gridmin):
# x0 = linspace(0, 1, np)
# return exp(x0)**2+(gridmin-1)
|
scivision/gridaurora
|
gridaurora/ztanh.py
|
_ztanh
|
python
|
def _ztanh(Np: int, gridmin: float, gridmax: float) -> np.ndarray:
x0 = np.linspace(0, 3.14, Np) # arbitrarily picking 3.14 as where tanh gets to 99% of asymptote
return np.tanh(x0)*gridmax+gridmin
|
typically call via setupz instead
|
train
|
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/ztanh.py#L22-L27
| null |
#!/usr/bin/env python
"""
inspired by Matt Zettergren
Michael Hirsch
"""
import numpy as np
def setupz(Np: int, zmin: float, gridmin: float, gridmax: float) -> np.ndarray:
"""
np: number of grid points
zmin: minimum STEP SIZE at minimum grid altitude [km]
gridmin: minimum altitude of grid [km]
gridmax: maximum altitude of grid [km]
"""
dz = _ztanh(Np, gridmin, gridmax)
return np.insert(np.cumsum(dz)+zmin, 0, zmin)[:-1]
# def zexp(np,gridmin):
# x0 = linspace(0, 1, np)
# return exp(x0)**2+(gridmin-1)
|
jacebrowning/comparable
|
comparable/tools.py
|
match_similar
|
python
|
def match_similar(base, items):
finds = list(find_similar(base, items))
if finds:
return max(finds, key=base.similarity) # TODO: make O(n)
return None
|
Get the most similar matching item from a list of items.
@param base: base item to locate best match
@param items: list of items for comparison
@return: most similar matching item or None
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/tools.py#L40-L52
|
[
"def find_similar(base, items):\n \"\"\"Get an iterator of items similar to the base.\n\n @param base: base item to locate best match\n @param items: list of items for comparison\n @return: generator of similar items\n\n \"\"\"\n return (item for item in items if base.similarity(item))\n"
] |
"""Functions to utilize lists of Comparable objects."""
def find_equal(base, items):
"""Get an iterator of items equal to the base.
@param base: base item to find equality
@param items: list of items for comparison
@return: generator of equal items
"""
return (item for item in items if base.equality(item))
def match_equal(base, items):
"""Get the first item that is equivalent to the base.
@param base: base item to find equality
@param items: list of items for comparison
@return: first equivalent item or None
"""
for item in find_equal(base, items):
return item
return None
def find_similar(base, items):
"""Get an iterator of items similar to the base.
@param base: base item to locate best match
@param items: list of items for comparison
@return: generator of similar items
"""
return (item for item in items if base.similarity(item))
def duplicates(base, items):
"""Get an iterator of items similar but not equal to the base.
@param base: base item to perform comparison against
@param items: list of items to compare to the base
@return: generator of items sorted by similarity to the base
"""
for item in items:
if item.similarity(base) and not item.equality(base):
yield item
def sort(base, items):
"""Get a sorted list of items ranked in descending similarity.
@param base: base item to perform comparison against
@param items: list of items to compare to the base
@return: list of items sorted by similarity to the base
"""
return sorted(items, key=base.similarity, reverse=True)
|
jacebrowning/comparable
|
comparable/tools.py
|
duplicates
|
python
|
def duplicates(base, items):
for item in items:
if item.similarity(base) and not item.equality(base):
yield item
|
Get an iterator of items similar but not equal to the base.
@param base: base item to perform comparison against
@param items: list of items to compare to the base
@return: generator of items sorted by similarity to the base
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/tools.py#L55-L65
| null |
"""Functions to utilize lists of Comparable objects."""
def find_equal(base, items):
"""Get an iterator of items equal to the base.
@param base: base item to find equality
@param items: list of items for comparison
@return: generator of equal items
"""
return (item for item in items if base.equality(item))
def match_equal(base, items):
"""Get the first item that is equivalent to the base.
@param base: base item to find equality
@param items: list of items for comparison
@return: first equivalent item or None
"""
for item in find_equal(base, items):
return item
return None
def find_similar(base, items):
"""Get an iterator of items similar to the base.
@param base: base item to locate best match
@param items: list of items for comparison
@return: generator of similar items
"""
return (item for item in items if base.similarity(item))
def match_similar(base, items):
"""Get the most similar matching item from a list of items.
@param base: base item to locate best match
@param items: list of items for comparison
@return: most similar matching item or None
"""
finds = list(find_similar(base, items))
if finds:
return max(finds, key=base.similarity) # TODO: make O(n)
return None
def sort(base, items):
"""Get a sorted list of items ranked in descending similarity.
@param base: base item to perform comparison against
@param items: list of items to compare to the base
@return: list of items sorted by similarity to the base
"""
return sorted(items, key=base.similarity, reverse=True)
|
jacebrowning/comparable
|
comparable/tools.py
|
sort
|
python
|
def sort(base, items):
return sorted(items, key=base.similarity, reverse=True)
|
Get a sorted list of items ranked in descending similarity.
@param base: base item to perform comparison against
@param items: list of items to compare to the base
@return: list of items sorted by similarity to the base
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/tools.py#L68-L76
| null |
"""Functions to utilize lists of Comparable objects."""
def find_equal(base, items):
"""Get an iterator of items equal to the base.
@param base: base item to find equality
@param items: list of items for comparison
@return: generator of equal items
"""
return (item for item in items if base.equality(item))
def match_equal(base, items):
"""Get the first item that is equivalent to the base.
@param base: base item to find equality
@param items: list of items for comparison
@return: first equivalent item or None
"""
for item in find_equal(base, items):
return item
return None
def find_similar(base, items):
"""Get an iterator of items similar to the base.
@param base: base item to locate best match
@param items: list of items for comparison
@return: generator of similar items
"""
return (item for item in items if base.similarity(item))
def match_similar(base, items):
"""Get the most similar matching item from a list of items.
@param base: base item to locate best match
@param items: list of items for comparison
@return: most similar matching item or None
"""
finds = list(find_similar(base, items))
if finds:
return max(finds, key=base.similarity) # TODO: make O(n)
return None
def duplicates(base, items):
"""Get an iterator of items similar but not equal to the base.
@param base: base item to perform comparison against
@param items: list of items to compare to the base
@return: generator of items sorted by similarity to the base
"""
for item in items:
if item.similarity(base) and not item.equality(base):
yield item
|
jacebrowning/comparable
|
comparable/simple.py
|
Number.similarity
|
python
|
def similarity(self, other):
numerator, denominator = sorted((self.value, other.value))
try:
ratio = float(numerator) / denominator
except ZeroDivisionError:
ratio = 0.0 if numerator else 1.0
similarity = self.Similarity(ratio)
return similarity
|
Get similarity as a ratio of the two numbers.
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L44-L52
|
[
"def Similarity(self, value=None): # pylint: disable=C0103\n \"\"\"Constructor for new default Similarities.\"\"\"\n if value is None:\n value = 0.0\n return Similarity(value, threshold=self.threshold)\n"
] |
class Number(_Simple):
"""Comparable positive number."""
threshold = 0.999 # 99.9% similar
def __init__(self, value):
super().__init__(value)
if value < 0:
raise ValueError("Number objects can only be positive")
def equality(self, other):
"""Get equality using floating point equality."""
return float(self) == float(other)
|
jacebrowning/comparable
|
comparable/simple.py
|
Text.similarity
|
python
|
def similarity(self, other):
ratio = SequenceMatcher(a=self.value, b=other.value).ratio()
similarity = self.Similarity(ratio)
return similarity
|
Get similarity as a ratio of the two texts.
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L65-L69
|
[
"def Similarity(self, value=None): # pylint: disable=C0103\n \"\"\"Constructor for new default Similarities.\"\"\"\n if value is None:\n value = 0.0\n return Similarity(value, threshold=self.threshold)\n"
] |
class Text(_Simple):
"""Comparable generic text."""
threshold = 0.83 # "Hello, world!" ~ "hello world"
def equality(self, other):
"""Get equality using string comparison."""
return str(self) == str(other)
|
jacebrowning/comparable
|
comparable/simple.py
|
TextEnum.similarity
|
python
|
def similarity(self, other):
ratio = 1.0 if (str(self).lower() == str(other).lower()) else 0.0
similarity = self.Similarity(ratio)
return similarity
|
Get similarity as a discrete ratio (1.0 or 0.0).
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L78-L82
|
[
"def Similarity(self, value=None): # pylint: disable=C0103\n \"\"\"Constructor for new default Similarities.\"\"\"\n if value is None:\n value = 0.0\n return Similarity(value, threshold=self.threshold)\n"
] |
class TextEnum(Text):
"""Comparable case-insensitive textual enumeration."""
threshold = 1.0 # enumerations must match
|
jacebrowning/comparable
|
comparable/simple.py
|
TextTitle._strip
|
python
|
def _strip(text):
text = text.strip()
text = text.replace(' ', ' ') # remove duplicate spaces
text = text.lower()
for joiner in TextTitle.JOINERS:
text = text.replace(joiner, 'and')
for article in TextTitle.ARTICLES:
if text.startswith(article + ' '):
text = text[len(article) + 1:]
break
return text
|
Strip articles/whitespace and remove case.
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L100-L111
| null |
class TextTitle(Text):
"""Comparable case-insensitive textual titles."""
threshold = 0.93 # "The Cat and the Hat" ~ "cat an' the hat"
ARTICLES = 'a', 'an', 'the' # stripped from the front
JOINERS = '&', '+' # replaced with 'and'
def __init__(self, value):
super().__init__(value)
self.stripped = self._strip(self.value)
logging.debug("stripped %r to %r", self.value, self.stripped)
@staticmethod
def similarity(self, other):
"""Get similarity as a ratio of the stripped text."""
logging.debug("comparing %r and %r...", self.stripped, other.stripped)
ratio = SequenceMatcher(a=self.stripped, b=other.stripped).ratio()
similarity = self.Similarity(ratio)
return similarity
|
jacebrowning/comparable
|
comparable/simple.py
|
TextTitle.similarity
|
python
|
def similarity(self, other):
logging.debug("comparing %r and %r...", self.stripped, other.stripped)
ratio = SequenceMatcher(a=self.stripped, b=other.stripped).ratio()
similarity = self.Similarity(ratio)
return similarity
|
Get similarity as a ratio of the stripped text.
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/simple.py#L113-L118
|
[
"def Similarity(self, value=None): # pylint: disable=C0103\n \"\"\"Constructor for new default Similarities.\"\"\"\n if value is None:\n value = 0.0\n return Similarity(value, threshold=self.threshold)\n"
] |
class TextTitle(Text):
"""Comparable case-insensitive textual titles."""
threshold = 0.93 # "The Cat and the Hat" ~ "cat an' the hat"
ARTICLES = 'a', 'an', 'the' # stripped from the front
JOINERS = '&', '+' # replaced with 'and'
def __init__(self, value):
super().__init__(value)
self.stripped = self._strip(self.value)
logging.debug("stripped %r to %r", self.value, self.stripped)
@staticmethod
def _strip(text):
"""Strip articles/whitespace and remove case."""
text = text.strip()
text = text.replace(' ', ' ') # remove duplicate spaces
text = text.lower()
for joiner in TextTitle.JOINERS:
text = text.replace(joiner, 'and')
for article in TextTitle.ARTICLES:
if text.startswith(article + ' '):
text = text[len(article) + 1:]
break
return text
|
jacebrowning/comparable
|
comparable/base.py
|
equal
|
python
|
def equal(obj1, obj2):
Comparable.log(obj1, obj2, '==')
equality = obj1.equality(obj2)
Comparable.log(obj1, obj2, '==', result=equality)
return equality
|
Calculate equality between two (Comparable) objects.
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L127-L132
|
[
"def equality(self, other):\n \"\"\"Compare two objects for equality.\n\n @param self: first object to compare\n @param other: second object to compare\n\n @return: boolean result of comparison\n\n \"\"\"\n # Compare specified attributes for equality\n cname = self.__class__.__name__\n for aname in self.attributes:\n try:\n attr1 = getattr(self, aname)\n attr2 = getattr(other, aname)\n except AttributeError as error:\n logging.debug(\"%s.%s: %s\", cname, aname, error)\n return False\n self.log(attr1, attr2, '==', cname=cname, aname=aname)\n eql = (attr1 == attr2)\n self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql)\n if not eql:\n return False\n\n return True\n",
"def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913\n \"\"\"Log the objects being compared and the result.\n\n When no result object is specified, subsequence calls will have an\n increased indentation level. The indentation level is decreased\n once a result object is provided.\n\n @param obj1: first object\n @param obj2: second object\n @param sym: operation being performed ('==' or '%')\n @param cname: name of class (when attributes are being compared)\n @param aname: name of attribute (when attributes are being compared)\n @param result: outcome of comparison\n\n \"\"\"\n fmt = \"{o1} {sym} {o2} : {r}\"\n if cname or aname:\n assert cname and aname # both must be specified\n fmt = \"{c}.{a}: \" + fmt\n\n if result is None:\n result = '...'\n fmt = _Indent.indent(fmt)\n _Indent.more()\n else:\n _Indent.less()\n fmt = _Indent.indent(fmt)\n\n msg = fmt.format(o1=repr(obj1), o2=repr(obj2),\n c=cname, a=aname, sym=sym, r=result)\n logging.info(msg)\n"
] |
"""Abstract base class and similarity functions."""
import logging
from collections import OrderedDict
from abc import ABCMeta, abstractmethod, abstractproperty # pylint: disable=W0611
class _Base(object): # pylint: disable=R0903
"""Shared base class."""
def _repr(self, *args, **kwargs):
"""Return a __repr__ string from the arguments provided to __init__.
@param args: list of arguments to __init__
@param kwargs: dictionary of keyword arguments to __init__
@return: __repr__ string
"""
# Remove unnecessary empty keywords arguments and sort the arguments
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs = OrderedDict(sorted(kwargs.items()))
# Build the __repr__ string pieces
args_repr = ', '.join(repr(arg) for arg in args)
kwargs_repr = ', '.join(k + '=' + repr(v) for k, v in kwargs.items())
if args_repr and kwargs_repr:
kwargs_repr = ', ' + kwargs_repr
name = self.__class__.__name__
return "{}({}{})".format(name, args_repr, kwargs_repr)
class Similarity(_Base): # pylint: disable=R0903
"""Represents the similarity between two objects."""
def __init__(self, value, threshold=1.0):
self.value = float(value)
self.threshold = float(threshold)
def __repr__(self):
return self._repr(self.value, threshold=self.threshold)
def __str__(self):
return "{:.1%} similar".format(self.value)
def __eq__(self, other):
return abs(float(self) - float(other)) < 0.001
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return float(self) < float(other)
def __gt__(self, other):
return float(self) > float(other)
def __bool__(self):
"""In boolean scenarios, similarity is True if the threshold is met."""
return self.value >= self.threshold
def __float__(self):
"""In non-boolean scenarios, similarity is treated like a float."""
return self.value
def __add__(self, other):
return Similarity(self.value + float(other), threshold=self.threshold)
def __radd__(self, other):
return Similarity(float(other) + self.value, threshold=self.threshold)
def __iadd__(self, other):
self.value += float(other)
return self
def __sub__(self, other):
return Similarity(self.value - float(other), threshold=self.threshold)
def __rsub__(self, other):
return Similarity(float(other) - self.value, threshold=self.threshold)
def __isub__(self, other):
self.value -= float(other)
return self
def __mul__(self, other):
return Similarity(self.value * float(other), threshold=self.threshold)
def __rmul__(self, other):
return Similarity(float(other) * self.value, threshold=self.threshold)
def __imul__(self, other):
self.value *= float(other)
return self
def __abs__(self):
return Similarity(abs(self.value), threshold=self.threshold)
def __round__(self, digits):
return Similarity(round(self.value, digits), threshold=self.threshold)
class _Indent(object):
"""Indent formatter for logging calls."""
level = 0
@classmethod
def more(cls):
"""Increase the indent level."""
cls.level += 1
@classmethod
def less(cls):
"""Decrease the indent level."""
cls.level = max(cls.level - 1, 0)
@classmethod
def indent(cls, fmt):
"""Get a new format string with indentation."""
return '| ' * cls.level + fmt
def similar(obj1, obj2):
"""Calculate similarity between two (Comparable) objects."""
Comparable.log(obj1, obj2, '%')
similarity = obj1.similarity(obj2)
Comparable.log(obj1, obj2, '%', result=similarity)
return similarity
class Comparable(_Base, metaclass=ABCMeta):
"""Abstract Base Class for objects that are comparable.
Subclasses directly comparable must override the 'equality' and
'similarity' methods to return a bool and 'Similarity' object,
respectively.
Subclasses comparable by attributes must override the
'attributes' property to define which (Comparable) attributes
should be considered.
Both types of subclasses may also override the 'threshold'
attribute to change the default similarity threshold.
"""
def __eq__(self, other):
"""Map the '==' operator to be a shortcut for "equality"."""
return equal(self, other)
def __ne__(self, other):
return not self == other
def __mod__(self, other):
"""Map the '%' operator to be a shortcut for "similarity"."""
return similar(self, other)
@abstractproperty
def attributes(self): # pragma: no cover, abstract
"""Get an attribute {name: weight} dictionary for comparisons."""
return {}
threshold = 1.0 # ratio for two objects to be considered "similar"
@abstractmethod
def equality(self, other):
"""Compare two objects for equality.
@param self: first object to compare
@param other: second object to compare
@return: boolean result of comparison
"""
# Compare specified attributes for equality
cname = self.__class__.__name__
for aname in self.attributes:
try:
attr1 = getattr(self, aname)
attr2 = getattr(other, aname)
except AttributeError as error:
logging.debug("%s.%s: %s", cname, aname, error)
return False
self.log(attr1, attr2, '==', cname=cname, aname=aname)
eql = (attr1 == attr2)
self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql)
if not eql:
return False
return True
@abstractmethod
def similarity(self, other):
"""Compare two objects for similarity.
@param self: first object to compare
@param other: second object to compare
@return: L{Similarity} result of comparison
"""
sim = self.Similarity()
total = 0.0
# Calculate similarity ratio for each attribute
cname = self.__class__.__name__
for aname, weight in self.attributes.items():
attr1 = getattr(self, aname, None)
attr2 = getattr(other, aname, None)
self.log(attr1, attr2, '%', cname=cname, aname=aname)
# Similarity is ignored if None on both objects
if attr1 is None and attr2 is None:
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes are both None")
continue
# Similarity is 0 if either attribute is non-Comparable
if not all((isinstance(attr1, Comparable),
isinstance(attr2, Comparable))):
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes not Comparable")
total += weight
continue
# Calculate similarity between the attributes
attr_sim = (attr1 % attr2)
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result=attr_sim)
# Add the similarity to the total
sim += attr_sim * weight
total += weight
# Scale the similarity so the total is 1.0
if total:
sim *= (1.0 / total)
return sim
def Similarity(self, value=None): # pylint: disable=C0103
"""Constructor for new default Similarities."""
if value is None:
value = 0.0
return Similarity(value, threshold=self.threshold)
@staticmethod
def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913
"""Log the objects being compared and the result.
When no result object is specified, subsequence calls will have an
increased indentation level. The indentation level is decreased
once a result object is provided.
@param obj1: first object
@param obj2: second object
@param sym: operation being performed ('==' or '%')
@param cname: name of class (when attributes are being compared)
@param aname: name of attribute (when attributes are being compared)
@param result: outcome of comparison
"""
fmt = "{o1} {sym} {o2} : {r}"
if cname or aname:
assert cname and aname # both must be specified
fmt = "{c}.{a}: " + fmt
if result is None:
result = '...'
fmt = _Indent.indent(fmt)
_Indent.more()
else:
_Indent.less()
fmt = _Indent.indent(fmt)
msg = fmt.format(o1=repr(obj1), o2=repr(obj2),
c=cname, a=aname, sym=sym, r=result)
logging.info(msg)
class SimpleComparable(Comparable): # pylint: disable=W0223
"""Abstract Base Class for objects that are directly comparable.
Subclasses directly comparable must override the 'equality' and
'similarity' methods to return a bool and 'Similarity' object,
respectively. They may also override the 'threshold' attribute
to change the default similarity threshold.
"""
@property
def attributes(self): # pragma: no cover, abstract
"""A simple comparable does not use the attributes property."""
raise AttributeError()
class CompoundComparable(Comparable): # pylint: disable=W0223
"""Abstract Base Class for objects that are comparable by attributes.
Subclasses comparable by attributes must override the
'attributes' property to define which (Comparable) attributes
should be considered. They may also override the 'threshold'
attribute to change the default similarity threshold.
"""
def equality(self, other):
"""A compound comparable's equality is based on attributes."""
return super().equality(other)
def similarity(self, other):
"""A compound comparable's similarity is based on attributes."""
return super().similarity(other)
|
jacebrowning/comparable
|
comparable/base.py
|
similar
|
python
|
def similar(obj1, obj2):
Comparable.log(obj1, obj2, '%')
similarity = obj1.similarity(obj2)
Comparable.log(obj1, obj2, '%', result=similarity)
return similarity
|
Calculate similarity between two (Comparable) objects.
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L135-L140
|
[
"def similarity(self, other):\n \"\"\"Compare two objects for similarity.\n\n @param self: first object to compare\n @param other: second object to compare\n\n @return: L{Similarity} result of comparison\n\n \"\"\"\n sim = self.Similarity()\n total = 0.0\n\n # Calculate similarity ratio for each attribute\n cname = self.__class__.__name__\n for aname, weight in self.attributes.items():\n\n attr1 = getattr(self, aname, None)\n attr2 = getattr(other, aname, None)\n self.log(attr1, attr2, '%', cname=cname, aname=aname)\n\n # Similarity is ignored if None on both objects\n if attr1 is None and attr2 is None:\n self.log(attr1, attr2, '%', cname=cname, aname=aname,\n result=\"attributes are both None\")\n continue\n\n # Similarity is 0 if either attribute is non-Comparable\n if not all((isinstance(attr1, Comparable),\n isinstance(attr2, Comparable))):\n self.log(attr1, attr2, '%', cname=cname, aname=aname,\n result=\"attributes not Comparable\")\n total += weight\n continue\n\n # Calculate similarity between the attributes\n attr_sim = (attr1 % attr2)\n self.log(attr1, attr2, '%', cname=cname, aname=aname,\n result=attr_sim)\n\n # Add the similarity to the total\n sim += attr_sim * weight\n total += weight\n\n # Scale the similarity so the total is 1.0\n if total:\n sim *= (1.0 / total)\n\n return sim\n",
"def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913\n \"\"\"Log the objects being compared and the result.\n\n When no result object is specified, subsequence calls will have an\n increased indentation level. The indentation level is decreased\n once a result object is provided.\n\n @param obj1: first object\n @param obj2: second object\n @param sym: operation being performed ('==' or '%')\n @param cname: name of class (when attributes are being compared)\n @param aname: name of attribute (when attributes are being compared)\n @param result: outcome of comparison\n\n \"\"\"\n fmt = \"{o1} {sym} {o2} : {r}\"\n if cname or aname:\n assert cname and aname # both must be specified\n fmt = \"{c}.{a}: \" + fmt\n\n if result is None:\n result = '...'\n fmt = _Indent.indent(fmt)\n _Indent.more()\n else:\n _Indent.less()\n fmt = _Indent.indent(fmt)\n\n msg = fmt.format(o1=repr(obj1), o2=repr(obj2),\n c=cname, a=aname, sym=sym, r=result)\n logging.info(msg)\n"
] |
"""Abstract base class and similarity functions."""
import logging
from collections import OrderedDict
from abc import ABCMeta, abstractmethod, abstractproperty # pylint: disable=W0611
class _Base(object): # pylint: disable=R0903
"""Shared base class."""
def _repr(self, *args, **kwargs):
"""Return a __repr__ string from the arguments provided to __init__.
@param args: list of arguments to __init__
@param kwargs: dictionary of keyword arguments to __init__
@return: __repr__ string
"""
# Remove unnecessary empty keywords arguments and sort the arguments
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs = OrderedDict(sorted(kwargs.items()))
# Build the __repr__ string pieces
args_repr = ', '.join(repr(arg) for arg in args)
kwargs_repr = ', '.join(k + '=' + repr(v) for k, v in kwargs.items())
if args_repr and kwargs_repr:
kwargs_repr = ', ' + kwargs_repr
name = self.__class__.__name__
return "{}({}{})".format(name, args_repr, kwargs_repr)
class Similarity(_Base): # pylint: disable=R0903
"""Represents the similarity between two objects."""
def __init__(self, value, threshold=1.0):
self.value = float(value)
self.threshold = float(threshold)
def __repr__(self):
return self._repr(self.value, threshold=self.threshold)
def __str__(self):
return "{:.1%} similar".format(self.value)
def __eq__(self, other):
return abs(float(self) - float(other)) < 0.001
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return float(self) < float(other)
def __gt__(self, other):
return float(self) > float(other)
def __bool__(self):
"""In boolean scenarios, similarity is True if the threshold is met."""
return self.value >= self.threshold
def __float__(self):
"""In non-boolean scenarios, similarity is treated like a float."""
return self.value
def __add__(self, other):
return Similarity(self.value + float(other), threshold=self.threshold)
def __radd__(self, other):
return Similarity(float(other) + self.value, threshold=self.threshold)
def __iadd__(self, other):
self.value += float(other)
return self
def __sub__(self, other):
return Similarity(self.value - float(other), threshold=self.threshold)
def __rsub__(self, other):
return Similarity(float(other) - self.value, threshold=self.threshold)
def __isub__(self, other):
self.value -= float(other)
return self
def __mul__(self, other):
return Similarity(self.value * float(other), threshold=self.threshold)
def __rmul__(self, other):
return Similarity(float(other) * self.value, threshold=self.threshold)
def __imul__(self, other):
self.value *= float(other)
return self
def __abs__(self):
return Similarity(abs(self.value), threshold=self.threshold)
def __round__(self, digits):
return Similarity(round(self.value, digits), threshold=self.threshold)
class _Indent(object):
"""Indent formatter for logging calls."""
level = 0
@classmethod
def more(cls):
"""Increase the indent level."""
cls.level += 1
@classmethod
def less(cls):
"""Decrease the indent level."""
cls.level = max(cls.level - 1, 0)
@classmethod
def indent(cls, fmt):
"""Get a new format string with indentation."""
return '| ' * cls.level + fmt
def equal(obj1, obj2):
"""Calculate equality between two (Comparable) objects."""
Comparable.log(obj1, obj2, '==')
equality = obj1.equality(obj2)
Comparable.log(obj1, obj2, '==', result=equality)
return equality
class Comparable(_Base, metaclass=ABCMeta):
"""Abstract Base Class for objects that are comparable.
Subclasses directly comparable must override the 'equality' and
'similarity' methods to return a bool and 'Similarity' object,
respectively.
Subclasses comparable by attributes must override the
'attributes' property to define which (Comparable) attributes
should be considered.
Both types of subclasses may also override the 'threshold'
attribute to change the default similarity threshold.
"""
def __eq__(self, other):
"""Map the '==' operator to be a shortcut for "equality"."""
return equal(self, other)
def __ne__(self, other):
return not self == other
def __mod__(self, other):
"""Map the '%' operator to be a shortcut for "similarity"."""
return similar(self, other)
@abstractproperty
def attributes(self): # pragma: no cover, abstract
"""Get an attribute {name: weight} dictionary for comparisons."""
return {}
threshold = 1.0 # ratio for two objects to be considered "similar"
@abstractmethod
def equality(self, other):
"""Compare two objects for equality.
@param self: first object to compare
@param other: second object to compare
@return: boolean result of comparison
"""
# Compare specified attributes for equality
cname = self.__class__.__name__
for aname in self.attributes:
try:
attr1 = getattr(self, aname)
attr2 = getattr(other, aname)
except AttributeError as error:
logging.debug("%s.%s: %s", cname, aname, error)
return False
self.log(attr1, attr2, '==', cname=cname, aname=aname)
eql = (attr1 == attr2)
self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql)
if not eql:
return False
return True
@abstractmethod
def similarity(self, other):
"""Compare two objects for similarity.
@param self: first object to compare
@param other: second object to compare
@return: L{Similarity} result of comparison
"""
sim = self.Similarity()
total = 0.0
# Calculate similarity ratio for each attribute
cname = self.__class__.__name__
for aname, weight in self.attributes.items():
attr1 = getattr(self, aname, None)
attr2 = getattr(other, aname, None)
self.log(attr1, attr2, '%', cname=cname, aname=aname)
# Similarity is ignored if None on both objects
if attr1 is None and attr2 is None:
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes are both None")
continue
# Similarity is 0 if either attribute is non-Comparable
if not all((isinstance(attr1, Comparable),
isinstance(attr2, Comparable))):
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes not Comparable")
total += weight
continue
# Calculate similarity between the attributes
attr_sim = (attr1 % attr2)
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result=attr_sim)
# Add the similarity to the total
sim += attr_sim * weight
total += weight
# Scale the similarity so the total is 1.0
if total:
sim *= (1.0 / total)
return sim
def Similarity(self, value=None): # pylint: disable=C0103
"""Constructor for new default Similarities."""
if value is None:
value = 0.0
return Similarity(value, threshold=self.threshold)
@staticmethod
def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913
"""Log the objects being compared and the result.
When no result object is specified, subsequence calls will have an
increased indentation level. The indentation level is decreased
once a result object is provided.
@param obj1: first object
@param obj2: second object
@param sym: operation being performed ('==' or '%')
@param cname: name of class (when attributes are being compared)
@param aname: name of attribute (when attributes are being compared)
@param result: outcome of comparison
"""
fmt = "{o1} {sym} {o2} : {r}"
if cname or aname:
assert cname and aname # both must be specified
fmt = "{c}.{a}: " + fmt
if result is None:
result = '...'
fmt = _Indent.indent(fmt)
_Indent.more()
else:
_Indent.less()
fmt = _Indent.indent(fmt)
msg = fmt.format(o1=repr(obj1), o2=repr(obj2),
c=cname, a=aname, sym=sym, r=result)
logging.info(msg)
class SimpleComparable(Comparable): # pylint: disable=W0223
"""Abstract Base Class for objects that are directly comparable.
Subclasses directly comparable must override the 'equality' and
'similarity' methods to return a bool and 'Similarity' object,
respectively. They may also override the 'threshold' attribute
to change the default similarity threshold.
"""
@property
def attributes(self): # pragma: no cover, abstract
"""A simple comparable does not use the attributes property."""
raise AttributeError()
class CompoundComparable(Comparable): # pylint: disable=W0223
"""Abstract Base Class for objects that are comparable by attributes.
Subclasses comparable by attributes must override the
'attributes' property to define which (Comparable) attributes
should be considered. They may also override the 'threshold'
attribute to change the default similarity threshold.
"""
def equality(self, other):
"""A compound comparable's equality is based on attributes."""
return super().equality(other)
def similarity(self, other):
"""A compound comparable's similarity is based on attributes."""
return super().similarity(other)
|
jacebrowning/comparable
|
comparable/base.py
|
_Base._repr
|
python
|
def _repr(self, *args, **kwargs):
# Remove unnecessary empty keywords arguments and sort the arguments
kwargs = {k: v for k, v in kwargs.items() if v is not None}
kwargs = OrderedDict(sorted(kwargs.items()))
# Build the __repr__ string pieces
args_repr = ', '.join(repr(arg) for arg in args)
kwargs_repr = ', '.join(k + '=' + repr(v) for k, v in kwargs.items())
if args_repr and kwargs_repr:
kwargs_repr = ', ' + kwargs_repr
name = self.__class__.__name__
return "{}({}{})".format(name, args_repr, kwargs_repr)
|
Return a __repr__ string from the arguments provided to __init__.
@param args: list of arguments to __init__
@param kwargs: dictionary of keyword arguments to __init__
@return: __repr__ string
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L12-L31
| null |
class _Base(object): # pylint: disable=R0903
"""Shared base class."""
|
jacebrowning/comparable
|
comparable/base.py
|
Comparable.equality
|
python
|
def equality(self, other):
# Compare specified attributes for equality
cname = self.__class__.__name__
for aname in self.attributes:
try:
attr1 = getattr(self, aname)
attr2 = getattr(other, aname)
except AttributeError as error:
logging.debug("%s.%s: %s", cname, aname, error)
return False
self.log(attr1, attr2, '==', cname=cname, aname=aname)
eql = (attr1 == attr2)
self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql)
if not eql:
return False
return True
|
Compare two objects for equality.
@param self: first object to compare
@param other: second object to compare
@return: boolean result of comparison
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L179-L203
|
[
"def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913\n \"\"\"Log the objects being compared and the result.\n\n When no result object is specified, subsequence calls will have an\n increased indentation level. The indentation level is decreased\n once a result object is provided.\n\n @param obj1: first object\n @param obj2: second object\n @param sym: operation being performed ('==' or '%')\n @param cname: name of class (when attributes are being compared)\n @param aname: name of attribute (when attributes are being compared)\n @param result: outcome of comparison\n\n \"\"\"\n fmt = \"{o1} {sym} {o2} : {r}\"\n if cname or aname:\n assert cname and aname # both must be specified\n fmt = \"{c}.{a}: \" + fmt\n\n if result is None:\n result = '...'\n fmt = _Indent.indent(fmt)\n _Indent.more()\n else:\n _Indent.less()\n fmt = _Indent.indent(fmt)\n\n msg = fmt.format(o1=repr(obj1), o2=repr(obj2),\n c=cname, a=aname, sym=sym, r=result)\n logging.info(msg)\n"
] |
class Comparable(_Base, metaclass=ABCMeta):
"""Abstract Base Class for objects that are comparable.
Subclasses directly comparable must override the 'equality' and
'similarity' methods to return a bool and 'Similarity' object,
respectively.
Subclasses comparable by attributes must override the
'attributes' property to define which (Comparable) attributes
should be considered.
Both types of subclasses may also override the 'threshold'
attribute to change the default similarity threshold.
"""
def __eq__(self, other):
"""Map the '==' operator to be a shortcut for "equality"."""
return equal(self, other)
def __ne__(self, other):
return not self == other
def __mod__(self, other):
"""Map the '%' operator to be a shortcut for "similarity"."""
return similar(self, other)
@abstractproperty
def attributes(self): # pragma: no cover, abstract
"""Get an attribute {name: weight} dictionary for comparisons."""
return {}
threshold = 1.0 # ratio for two objects to be considered "similar"
@abstractmethod
@abstractmethod
def similarity(self, other):
"""Compare two objects for similarity.
@param self: first object to compare
@param other: second object to compare
@return: L{Similarity} result of comparison
"""
sim = self.Similarity()
total = 0.0
# Calculate similarity ratio for each attribute
cname = self.__class__.__name__
for aname, weight in self.attributes.items():
attr1 = getattr(self, aname, None)
attr2 = getattr(other, aname, None)
self.log(attr1, attr2, '%', cname=cname, aname=aname)
# Similarity is ignored if None on both objects
if attr1 is None and attr2 is None:
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes are both None")
continue
# Similarity is 0 if either attribute is non-Comparable
if not all((isinstance(attr1, Comparable),
isinstance(attr2, Comparable))):
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes not Comparable")
total += weight
continue
# Calculate similarity between the attributes
attr_sim = (attr1 % attr2)
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result=attr_sim)
# Add the similarity to the total
sim += attr_sim * weight
total += weight
# Scale the similarity so the total is 1.0
if total:
sim *= (1.0 / total)
return sim
def Similarity(self, value=None): # pylint: disable=C0103
"""Constructor for new default Similarities."""
if value is None:
value = 0.0
return Similarity(value, threshold=self.threshold)
@staticmethod
def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913
"""Log the objects being compared and the result.
When no result object is specified, subsequence calls will have an
increased indentation level. The indentation level is decreased
once a result object is provided.
@param obj1: first object
@param obj2: second object
@param sym: operation being performed ('==' or '%')
@param cname: name of class (when attributes are being compared)
@param aname: name of attribute (when attributes are being compared)
@param result: outcome of comparison
"""
fmt = "{o1} {sym} {o2} : {r}"
if cname or aname:
assert cname and aname # both must be specified
fmt = "{c}.{a}: " + fmt
if result is None:
result = '...'
fmt = _Indent.indent(fmt)
_Indent.more()
else:
_Indent.less()
fmt = _Indent.indent(fmt)
msg = fmt.format(o1=repr(obj1), o2=repr(obj2),
c=cname, a=aname, sym=sym, r=result)
logging.info(msg)
|
jacebrowning/comparable
|
comparable/base.py
|
Comparable.similarity
|
python
|
def similarity(self, other):
sim = self.Similarity()
total = 0.0
# Calculate similarity ratio for each attribute
cname = self.__class__.__name__
for aname, weight in self.attributes.items():
attr1 = getattr(self, aname, None)
attr2 = getattr(other, aname, None)
self.log(attr1, attr2, '%', cname=cname, aname=aname)
# Similarity is ignored if None on both objects
if attr1 is None and attr2 is None:
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes are both None")
continue
# Similarity is 0 if either attribute is non-Comparable
if not all((isinstance(attr1, Comparable),
isinstance(attr2, Comparable))):
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes not Comparable")
total += weight
continue
# Calculate similarity between the attributes
attr_sim = (attr1 % attr2)
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result=attr_sim)
# Add the similarity to the total
sim += attr_sim * weight
total += weight
# Scale the similarity so the total is 1.0
if total:
sim *= (1.0 / total)
return sim
|
Compare two objects for similarity.
@param self: first object to compare
@param other: second object to compare
@return: L{Similarity} result of comparison
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L206-L253
|
[
"def Similarity(self, value=None): # pylint: disable=C0103\n \"\"\"Constructor for new default Similarities.\"\"\"\n if value is None:\n value = 0.0\n return Similarity(value, threshold=self.threshold)\n",
"def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913\n \"\"\"Log the objects being compared and the result.\n\n When no result object is specified, subsequence calls will have an\n increased indentation level. The indentation level is decreased\n once a result object is provided.\n\n @param obj1: first object\n @param obj2: second object\n @param sym: operation being performed ('==' or '%')\n @param cname: name of class (when attributes are being compared)\n @param aname: name of attribute (when attributes are being compared)\n @param result: outcome of comparison\n\n \"\"\"\n fmt = \"{o1} {sym} {o2} : {r}\"\n if cname or aname:\n assert cname and aname # both must be specified\n fmt = \"{c}.{a}: \" + fmt\n\n if result is None:\n result = '...'\n fmt = _Indent.indent(fmt)\n _Indent.more()\n else:\n _Indent.less()\n fmt = _Indent.indent(fmt)\n\n msg = fmt.format(o1=repr(obj1), o2=repr(obj2),\n c=cname, a=aname, sym=sym, r=result)\n logging.info(msg)\n"
] |
class Comparable(_Base, metaclass=ABCMeta):
"""Abstract Base Class for objects that are comparable.
Subclasses directly comparable must override the 'equality' and
'similarity' methods to return a bool and 'Similarity' object,
respectively.
Subclasses comparable by attributes must override the
'attributes' property to define which (Comparable) attributes
should be considered.
Both types of subclasses may also override the 'threshold'
attribute to change the default similarity threshold.
"""
def __eq__(self, other):
"""Map the '==' operator to be a shortcut for "equality"."""
return equal(self, other)
def __ne__(self, other):
return not self == other
def __mod__(self, other):
"""Map the '%' operator to be a shortcut for "similarity"."""
return similar(self, other)
@abstractproperty
def attributes(self): # pragma: no cover, abstract
"""Get an attribute {name: weight} dictionary for comparisons."""
return {}
threshold = 1.0 # ratio for two objects to be considered "similar"
@abstractmethod
def equality(self, other):
"""Compare two objects for equality.
@param self: first object to compare
@param other: second object to compare
@return: boolean result of comparison
"""
# Compare specified attributes for equality
cname = self.__class__.__name__
for aname in self.attributes:
try:
attr1 = getattr(self, aname)
attr2 = getattr(other, aname)
except AttributeError as error:
logging.debug("%s.%s: %s", cname, aname, error)
return False
self.log(attr1, attr2, '==', cname=cname, aname=aname)
eql = (attr1 == attr2)
self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql)
if not eql:
return False
return True
@abstractmethod
def Similarity(self, value=None): # pylint: disable=C0103
"""Constructor for new default Similarities."""
if value is None:
value = 0.0
return Similarity(value, threshold=self.threshold)
@staticmethod
def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913
"""Log the objects being compared and the result.
When no result object is specified, subsequence calls will have an
increased indentation level. The indentation level is decreased
once a result object is provided.
@param obj1: first object
@param obj2: second object
@param sym: operation being performed ('==' or '%')
@param cname: name of class (when attributes are being compared)
@param aname: name of attribute (when attributes are being compared)
@param result: outcome of comparison
"""
fmt = "{o1} {sym} {o2} : {r}"
if cname or aname:
assert cname and aname # both must be specified
fmt = "{c}.{a}: " + fmt
if result is None:
result = '...'
fmt = _Indent.indent(fmt)
_Indent.more()
else:
_Indent.less()
fmt = _Indent.indent(fmt)
msg = fmt.format(o1=repr(obj1), o2=repr(obj2),
c=cname, a=aname, sym=sym, r=result)
logging.info(msg)
|
jacebrowning/comparable
|
comparable/base.py
|
Comparable.Similarity
|
python
|
def Similarity(self, value=None): # pylint: disable=C0103
if value is None:
value = 0.0
return Similarity(value, threshold=self.threshold)
|
Constructor for new default Similarities.
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L255-L259
| null |
class Comparable(_Base, metaclass=ABCMeta):
"""Abstract Base Class for objects that are comparable.
Subclasses directly comparable must override the 'equality' and
'similarity' methods to return a bool and 'Similarity' object,
respectively.
Subclasses comparable by attributes must override the
'attributes' property to define which (Comparable) attributes
should be considered.
Both types of subclasses may also override the 'threshold'
attribute to change the default similarity threshold.
"""
def __eq__(self, other):
"""Map the '==' operator to be a shortcut for "equality"."""
return equal(self, other)
def __ne__(self, other):
return not self == other
def __mod__(self, other):
"""Map the '%' operator to be a shortcut for "similarity"."""
return similar(self, other)
@abstractproperty
def attributes(self): # pragma: no cover, abstract
"""Get an attribute {name: weight} dictionary for comparisons."""
return {}
threshold = 1.0 # ratio for two objects to be considered "similar"
@abstractmethod
def equality(self, other):
"""Compare two objects for equality.
@param self: first object to compare
@param other: second object to compare
@return: boolean result of comparison
"""
# Compare specified attributes for equality
cname = self.__class__.__name__
for aname in self.attributes:
try:
attr1 = getattr(self, aname)
attr2 = getattr(other, aname)
except AttributeError as error:
logging.debug("%s.%s: %s", cname, aname, error)
return False
self.log(attr1, attr2, '==', cname=cname, aname=aname)
eql = (attr1 == attr2)
self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql)
if not eql:
return False
return True
@abstractmethod
def similarity(self, other):
"""Compare two objects for similarity.
@param self: first object to compare
@param other: second object to compare
@return: L{Similarity} result of comparison
"""
sim = self.Similarity()
total = 0.0
# Calculate similarity ratio for each attribute
cname = self.__class__.__name__
for aname, weight in self.attributes.items():
attr1 = getattr(self, aname, None)
attr2 = getattr(other, aname, None)
self.log(attr1, attr2, '%', cname=cname, aname=aname)
# Similarity is ignored if None on both objects
if attr1 is None and attr2 is None:
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes are both None")
continue
# Similarity is 0 if either attribute is non-Comparable
if not all((isinstance(attr1, Comparable),
isinstance(attr2, Comparable))):
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes not Comparable")
total += weight
continue
# Calculate similarity between the attributes
attr_sim = (attr1 % attr2)
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result=attr_sim)
# Add the similarity to the total
sim += attr_sim * weight
total += weight
# Scale the similarity so the total is 1.0
if total:
sim *= (1.0 / total)
return sim
@staticmethod
def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913
"""Log the objects being compared and the result.
When no result object is specified, subsequence calls will have an
increased indentation level. The indentation level is decreased
once a result object is provided.
@param obj1: first object
@param obj2: second object
@param sym: operation being performed ('==' or '%')
@param cname: name of class (when attributes are being compared)
@param aname: name of attribute (when attributes are being compared)
@param result: outcome of comparison
"""
fmt = "{o1} {sym} {o2} : {r}"
if cname or aname:
assert cname and aname # both must be specified
fmt = "{c}.{a}: " + fmt
if result is None:
result = '...'
fmt = _Indent.indent(fmt)
_Indent.more()
else:
_Indent.less()
fmt = _Indent.indent(fmt)
msg = fmt.format(o1=repr(obj1), o2=repr(obj2),
c=cname, a=aname, sym=sym, r=result)
logging.info(msg)
|
jacebrowning/comparable
|
comparable/base.py
|
Comparable.log
|
python
|
def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913
fmt = "{o1} {sym} {o2} : {r}"
if cname or aname:
assert cname and aname # both must be specified
fmt = "{c}.{a}: " + fmt
if result is None:
result = '...'
fmt = _Indent.indent(fmt)
_Indent.more()
else:
_Indent.less()
fmt = _Indent.indent(fmt)
msg = fmt.format(o1=repr(obj1), o2=repr(obj2),
c=cname, a=aname, sym=sym, r=result)
logging.info(msg)
|
Log the objects being compared and the result.
When no result object is specified, subsequence calls will have an
increased indentation level. The indentation level is decreased
once a result object is provided.
@param obj1: first object
@param obj2: second object
@param sym: operation being performed ('==' or '%')
@param cname: name of class (when attributes are being compared)
@param aname: name of attribute (when attributes are being compared)
@param result: outcome of comparison
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/base.py#L262-L292
|
[
"def more(cls):\n \"\"\"Increase the indent level.\"\"\"\n cls.level += 1\n",
"def less(cls):\n \"\"\"Decrease the indent level.\"\"\"\n cls.level = max(cls.level - 1, 0)\n",
"def indent(cls, fmt):\n \"\"\"Get a new format string with indentation.\"\"\"\n return '| ' * cls.level + fmt\n"
] |
class Comparable(_Base, metaclass=ABCMeta):
"""Abstract Base Class for objects that are comparable.
Subclasses directly comparable must override the 'equality' and
'similarity' methods to return a bool and 'Similarity' object,
respectively.
Subclasses comparable by attributes must override the
'attributes' property to define which (Comparable) attributes
should be considered.
Both types of subclasses may also override the 'threshold'
attribute to change the default similarity threshold.
"""
def __eq__(self, other):
"""Map the '==' operator to be a shortcut for "equality"."""
return equal(self, other)
def __ne__(self, other):
return not self == other
def __mod__(self, other):
"""Map the '%' operator to be a shortcut for "similarity"."""
return similar(self, other)
@abstractproperty
def attributes(self): # pragma: no cover, abstract
"""Get an attribute {name: weight} dictionary for comparisons."""
return {}
threshold = 1.0 # ratio for two objects to be considered "similar"
@abstractmethod
def equality(self, other):
"""Compare two objects for equality.
@param self: first object to compare
@param other: second object to compare
@return: boolean result of comparison
"""
# Compare specified attributes for equality
cname = self.__class__.__name__
for aname in self.attributes:
try:
attr1 = getattr(self, aname)
attr2 = getattr(other, aname)
except AttributeError as error:
logging.debug("%s.%s: %s", cname, aname, error)
return False
self.log(attr1, attr2, '==', cname=cname, aname=aname)
eql = (attr1 == attr2)
self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql)
if not eql:
return False
return True
@abstractmethod
def similarity(self, other):
"""Compare two objects for similarity.
@param self: first object to compare
@param other: second object to compare
@return: L{Similarity} result of comparison
"""
sim = self.Similarity()
total = 0.0
# Calculate similarity ratio for each attribute
cname = self.__class__.__name__
for aname, weight in self.attributes.items():
attr1 = getattr(self, aname, None)
attr2 = getattr(other, aname, None)
self.log(attr1, attr2, '%', cname=cname, aname=aname)
# Similarity is ignored if None on both objects
if attr1 is None and attr2 is None:
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes are both None")
continue
# Similarity is 0 if either attribute is non-Comparable
if not all((isinstance(attr1, Comparable),
isinstance(attr2, Comparable))):
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result="attributes not Comparable")
total += weight
continue
# Calculate similarity between the attributes
attr_sim = (attr1 % attr2)
self.log(attr1, attr2, '%', cname=cname, aname=aname,
result=attr_sim)
# Add the similarity to the total
sim += attr_sim * weight
total += weight
# Scale the similarity so the total is 1.0
if total:
sim *= (1.0 / total)
return sim
def Similarity(self, value=None): # pylint: disable=C0103
"""Constructor for new default Similarities."""
if value is None:
value = 0.0
return Similarity(value, threshold=self.threshold)
@staticmethod
|
jacebrowning/comparable
|
comparable/compound.py
|
Group.equality
|
python
|
def equality(self, other):
if not len(self) == len(other):
return False
return super().equality(other)
|
Calculate equality based on equality of all group items.
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/compound.py#L42-L46
|
[
"def equality(self, other):\n \"\"\"A compound comparable's equality is based on attributes.\"\"\"\n return super().equality(other)\n"
] |
class Group(CompoundComparable): # pylint: disable=W0223
"""Comparable list of Comparable items."""
attributes = None # created dynamically
def __init__(self, items):
self.items = items
names = ("item{0}".format(n + 1) for n in range(len(items)))
self.attributes = {name: 1 for name in names}
def __repr__(self):
return self._repr(self.items)
def __getattr__(self, name):
"""Allow self.items[<i>] to be accessed as self.item<i+1>."""
if name.startswith('item'):
try:
index = int(name[4:]) - 1 # "item<n>" -> <n>-1
return self[index]
except ValueError:
logging.debug("%s is not in the form 'item<n>'", name)
except IndexError:
logging.debug("item index %s is out of range", index)
raise AttributeError
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
def similarity(self, other):
"""Calculate similarity based on best matching permutation of items."""
# Select the longer list as the basis for comparison
if len(self.items) > len(other.items):
first, second = self, other
else:
first, second = other, self
items = list(first.items) # backup items list
length = len(items)
sim = self.Similarity(0.0 if length else 1.0)
# Calculate the similarity for each permutation of items
cname = self.__class__.__name__
for num, perm in enumerate(permutations(items, length), start=1):
first.items = perm
aname = 'items-p{}'.format(num)
self.log(first, second, '%', cname=cname, aname=aname)
permutation_sim = super(Group, first).similarity(second)
self.log(first, second, '%', cname=cname, aname=aname,
result=permutation_sim)
sim = max(sim, permutation_sim)
logging.debug("highest similarity: %s", sim)
first.items = items # restore original items list
return sim
|
jacebrowning/comparable
|
comparable/compound.py
|
Group.similarity
|
python
|
def similarity(self, other):
# Select the longer list as the basis for comparison
if len(self.items) > len(other.items):
first, second = self, other
else:
first, second = other, self
items = list(first.items) # backup items list
length = len(items)
sim = self.Similarity(0.0 if length else 1.0)
# Calculate the similarity for each permutation of items
cname = self.__class__.__name__
for num, perm in enumerate(permutations(items, length), start=1):
first.items = perm
aname = 'items-p{}'.format(num)
self.log(first, second, '%', cname=cname, aname=aname)
permutation_sim = super(Group, first).similarity(second)
self.log(first, second, '%', cname=cname, aname=aname,
result=permutation_sim)
sim = max(sim, permutation_sim)
logging.debug("highest similarity: %s", sim)
first.items = items # restore original items list
return sim
|
Calculate similarity based on best matching permutation of items.
|
train
|
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/compound.py#L48-L75
|
[
"def Similarity(self, value=None): # pylint: disable=C0103\n \"\"\"Constructor for new default Similarities.\"\"\"\n if value is None:\n value = 0.0\n return Similarity(value, threshold=self.threshold)\n",
"def log(obj1, obj2, sym, cname=None, aname=None, result=None): # pylint: disable=R0913\n \"\"\"Log the objects being compared and the result.\n\n When no result object is specified, subsequence calls will have an\n increased indentation level. The indentation level is decreased\n once a result object is provided.\n\n @param obj1: first object\n @param obj2: second object\n @param sym: operation being performed ('==' or '%')\n @param cname: name of class (when attributes are being compared)\n @param aname: name of attribute (when attributes are being compared)\n @param result: outcome of comparison\n\n \"\"\"\n fmt = \"{o1} {sym} {o2} : {r}\"\n if cname or aname:\n assert cname and aname # both must be specified\n fmt = \"{c}.{a}: \" + fmt\n\n if result is None:\n result = '...'\n fmt = _Indent.indent(fmt)\n _Indent.more()\n else:\n _Indent.less()\n fmt = _Indent.indent(fmt)\n\n msg = fmt.format(o1=repr(obj1), o2=repr(obj2),\n c=cname, a=aname, sym=sym, r=result)\n logging.info(msg)\n",
"def similarity(self, other):\n \"\"\"A compound comparable's similarity is based on attributes.\"\"\"\n return super().similarity(other)\n"
] |
class Group(CompoundComparable): # pylint: disable=W0223
"""Comparable list of Comparable items."""
attributes = None # created dynamically
def __init__(self, items):
self.items = items
names = ("item{0}".format(n + 1) for n in range(len(items)))
self.attributes = {name: 1 for name in names}
def __repr__(self):
return self._repr(self.items)
def __getattr__(self, name):
"""Allow self.items[<i>] to be accessed as self.item<i+1>."""
if name.startswith('item'):
try:
index = int(name[4:]) - 1 # "item<n>" -> <n>-1
return self[index]
except ValueError:
logging.debug("%s is not in the form 'item<n>'", name)
except IndexError:
logging.debug("item index %s is out of range", index)
raise AttributeError
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
def equality(self, other):
"""Calculate equality based on equality of all group items."""
if not len(self) == len(other):
return False
return super().equality(other)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
double_lorgauss
|
python
|
def double_lorgauss(x,p):
mu,sig1,sig2,gam1,gam2,G1,G2 = p
gam1 = float(gam1)
gam2 = float(gam2)
G1 = abs(G1)
G2 = abs(G2)
sig1 = abs(sig1)
sig2 = abs(sig2)
gam1 = abs(gam1)
gab2 = abs(gam2)
L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) -
(gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) +
(gam2/gam1)*(4-G1-G2))
L1 = 4 - G1 - G2 - L2
#print G1,G2,L1,L2
y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\
L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2)
y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\
L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2)
lo = (x < mu)
hi = (x >= mu)
return y1*lo + y2*hi
|
Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Input parameters: mu (mode of distribution),
sig1 (LH Gaussian width),
sig2 (RH Gaussian width),
gam1 (LH Lorentzian width),
gam2 (RH Lorentzian width),
G1 (LH Gaussian "strength"),
G2 (RH Gaussian "strength").
Returns
-------
values : float or array-like
Double LorGauss distribution evaluated at input(s). If single value provided,
single value returned.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L615-L664
| null |
from __future__ import absolute_import, division, print_function
__author__ = 'Timothy D. Morton <tim.morton@gmail.com>'
"""
Defines objects useful for describing probability distributions.
"""
import numpy as np
import matplotlib.pyplot as plt
import logging
from scipy.interpolate import UnivariateSpline as interpolate
from scipy.integrate import quad
import numpy.random as rand
from scipy.special import erf
from scipy.optimize import leastsq
import pandas as pd
from plotutils import setfig
from .kde import KDE
#figure this generic loading thing out; draft stage currently
def load_distribution(filename,path=''):
fns = pd.read_hdf(filename,path)
store = pd.HDFStore(filename)
if '{}/samples'.format(path) in store:
samples = pd.read_hdf(filename,path+'/samples')
samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0)
cdf = interpolate(fns['vals'],fns['cdf'],s=0)
attrs = store.get_storer('{}/fns'.format(path)).attrs
keywords = attrs.keywords
t = attrs.disttype
store.close()
return t.__init__()
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
ppf = pctile
def save_hdf(self,filename,path='',res=1000,logspace=False):
"""Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
"""
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
"""
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
"""
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
class Distribution_FromH5(Distribution):
"""Creates a Distribution object from one saved to an HDF file.
File must have a `DataFrame` saved under [path]/fns in
the .h5 file, containing 'vals', 'pdf', and 'cdf' columns.
If samples are saved in the HDF storer, then they will be restored
to this object; so will any saved keyword attributes.
These appropriate .h5 files will be created by a call to the `save_hdf`
method of the generic `Distribution` class.
Parameters
----------
filename : string
.h5 file where the distribution is saved.
path : string, optional
Path within the .h5 file where the distribution is saved. By
default this will be the root level, but can be anywhere.
kwargs
Keyword arguments are passed to the `Distribution` constructor.
"""
def __init__(self,filename,path='',**kwargs):
store = pd.HDFStore(filename,'r')
fns = store[path+'/fns']
if '{}/samples'.format(path) in store:
samples = store[path+'/samples']
self.samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0,k=1)
#check to see if tabulated CDF is monotonically increasing
d_cdf = fns['cdf'][1:] - fns['cdf'][:-1]
if np.any(d_cdf < 0):
logging.warning('tabulated CDF in {} is not strictly increasing. Recalculating CDF from PDF'.format(filename))
cdf = None #in this case, just recalc cdf from pdf
else:
cdf = interpolate(fns['vals'],fns['cdf'],s=0,k=1)
Distribution.__init__(self,pdf,cdf,minval=minval,maxval=maxval,
**kwargs)
store = pd.HDFStore(filename,'r')
try:
keywords = store.get_storer('{}/fns'.format(path)).attrs.keywords
for kw,val in keywords.iteritems():
setattr(self,kw,val)
except AttributeError:
logging.warning('saved distribution {} does not have keywords or disttype saved; perhaps this distribution was written with an older version.'.format(filename))
store.close()
class Empirical_Distribution(Distribution):
"""Generates a Distribution object given a tabulated PDF.
Parameters
----------
xs : array-like
x-values at which the PDF is evaluated
pdf : array-like
Values of pdf at provided x-values.
smooth : int or float
Smoothing parameter used by the interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,xs,pdf,smooth=0,**kwargs):
pdf /= np.trapz(pdf,xs)
fn = interpolate(xs,pdf,s=smooth)
keywords = {'smooth':smooth}
Distribution.__init__(self,fn,minval=xs.min(),maxval=xs.max(),
keywords=keywords,**kwargs)
class Gaussian_Distribution(Distribution):
"""Generates a normal distribution with given mu, sigma.
***It's probably better to use scipy.stats.norm rather than this
if you care about numerical precision/speed and don't care about the
plotting bells/whistles etc. the `Distribution` class provides.***
Parameters
----------
mu : float
Mean of normal distribution.
sig : float
Width of normal distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig,**kwargs):
self.mu = mu
self.sig = sig
def pdf(x):
return 1./np.sqrt(2*np.pi*sig**2)*np.exp(-(x-mu)**2/(2*sig**2))
def cdf(x):
return 0.5*(1 + erf((x-mu)/np.sqrt(2*sig**2)))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 10*sig
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 10*sig
keywords = {'mu':self.mu,'sig':self.sig}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +/- %.2f' % (self.name,self.mu,self.sig)
def resample(self,N,**kwargs):
return rand.normal(size=int(N))*self.sig + self.mu
class Hist_Distribution(Distribution):
"""Generates a distribution from a histogram of provided samples.
Uses `np.histogram` to create a histogram using the bins keyword,
then interpolates this histogram to create the pdf to pass to the
`Distribution` constructor.
Parameters
----------
samples : array-like
The samples used to create the distribution
bins : int or array-like, optional
Keyword passed to `np.histogram`. If integer, ths will be
the number of bins, if array-like, then this defines bin edges.
equibin : bool, optional
If true and ``bins`` is an integer ``N``, then the bins will be
found by splitting the data into ``N`` equal-sized groups.
smooth : int or float
Smoothing parameter used by the interpolation function.
order : int
Order of the spline to be used for interpolation. Default is
for linear interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,samples,bins=10,equibin=True,smooth=0,order=1,**kwargs):
self.samples = samples
if type(bins)==type(10) and equibin:
N = len(samples)//bins
sortsamples = np.sort(samples)
bins = sortsamples[0::N]
if bins[-1] != sortsamples[-1]:
bins = np.concatenate([bins,np.array([sortsamples[-1]])])
hist,bins = np.histogram(samples,bins=bins,density=True)
self.bins = bins
bins = (bins[1:] + bins[:-1])/2.
pdf_initial = interpolate(bins,hist,s=smooth,k=order)
def pdf(x):
x = np.atleast_1d(x)
y = pdf_initial(x)
w = np.where((x < self.bins[0]) | (x > self.bins[-1]))
y[w] = 0
return y
cdf = interpolate(bins,hist.cumsum()/hist.cumsum().max(),s=smooth,
k=order)
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
keywords = {'bins':bins,'smooth':smooth,'order':order}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def plothist(self,fig=None,**kwargs):
"""Plots a histogram of samples using provided bins.
Parameters
----------
fig : None or int
Parameter passed to `setfig`.
kwargs
Keyword arguments passed to `plt.hist`.
"""
setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
def resample(self,N):
"""Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
"""
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
class Box_Distribution(Distribution):
"""Simple distribution uniform between provided lower and upper limits.
Parameters
----------
lo,hi : float
Lower/upper limits of the distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,lo,hi,**kwargs):
self.lo = lo
self.hi = hi
def pdf(x):
return 1./(hi-lo) + 0*x
def cdf(x):
x = np.atleast_1d(x)
y = (x - lo) / (hi - lo)
y[x < lo] = 0
y[x > hi] = 1
return y
Distribution.__init__(self,pdf,cdf,minval=lo,maxval=hi,**kwargs)
def __str__(self):
return '%.1f < %s < %.1f' % (self.lo,self.name,self.hi)
def resample(self,N):
"""Returns a random sampling.
"""
return rand.random(size=N)*(self.maxval - self.minval) + self.minval
############## Double LorGauss ###########
def double_lorgauss(x,p):
"""Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Input parameters: mu (mode of distribution),
sig1 (LH Gaussian width),
sig2 (RH Gaussian width),
gam1 (LH Lorentzian width),
gam2 (RH Lorentzian width),
G1 (LH Gaussian "strength"),
G2 (RH Gaussian "strength").
Returns
-------
values : float or array-like
Double LorGauss distribution evaluated at input(s). If single value provided,
single value returned.
"""
mu,sig1,sig2,gam1,gam2,G1,G2 = p
gam1 = float(gam1)
gam2 = float(gam2)
G1 = abs(G1)
G2 = abs(G2)
sig1 = abs(sig1)
sig2 = abs(sig2)
gam1 = abs(gam1)
gab2 = abs(gam2)
L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) -
(gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) +
(gam2/gam1)*(4-G1-G2))
L1 = 4 - G1 - G2 - L2
#print G1,G2,L1,L2
y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\
L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2)
y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\
L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2)
lo = (x < mu)
hi = (x >= mu)
return y1*lo + y2*hi
def fit_double_lorgauss(bins,h,Ntry=5):
"""Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram.
Uses a grid of starting guesses to try to avoid local minima.
Parameters
----------
bins, h : array-like
Bins and heights of a histogram, as returned by, e.g., `np.histogram`.
Ntry : int, optional
Spacing of grid for starting guesses. Will try `Ntry**2` different
initial values of the "Gaussian strength" parameters `G1` and `G2`.
Returns
-------
parameters : tuple
Parameters of best-fit "double LorGauss" distribution.
Raises
------
ImportError
If the lmfit module is not available.
"""
try:
from lmfit import minimize, Parameters, Parameter, report_fit
except ImportError:
raise ImportError('you need lmfit to use this function.')
#make sure histogram is normalized
h /= np.trapz(h,bins)
#zero-pad the ends of the distribution to keep fits positive
N = len(bins)
dbin = (bins[1:]-bins[:-1]).mean()
newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10),
bins,
np.linspace(bins.max(),bins.max() + N/10*dbin,N/10)))
newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10)))
mu0 = bins[np.argmax(newh)]
sig0 = abs(mu0 - newbins[np.argmin(np.absolute(newh - 0.5*newh.max()))])
def set_params(G1,G2):
params = Parameters()
params.add('mu',value=mu0)
params.add('sig1',value=sig0)
params.add('sig2',value=sig0)
params.add('gam1',value=sig0/10)
params.add('gam2',value=sig0/10)
params.add('G1',value=G1)
params.add('G2',value=G2)
return params
sum_devsq_best = np.inf
outkeep = None
for G1 in np.linspace(0.1,1.9,Ntry):
for G2 in np.linspace(0.1,1.9,Ntry):
params = set_params(G1,G2)
def residual(ps):
pars = (params['mu'].value,
params['sig1'].value,
params['sig2'].value,
params['gam1'].value,
params['gam2'].value,
params['G1'].value,
params['G2'].value)
hmodel = double_lorgauss(newbins,pars)
return newh-hmodel
out = minimize(residual,params)
pars = (out.params['mu'].value,out.params['sig1'].value,
out.params['sig2'].value,out.params['gam1'].value,
out.params['gam2'].value,out.params['G1'].value,
out.params['G2'].value)
sum_devsq = ((newh - double_lorgauss(newbins,pars))**2).sum()
#print 'devs = %.1f; initial guesses for G1, G2; %.1f, %.1f' % (sum_devsq,G1, G2)
if sum_devsq < sum_devsq_best:
sum_devsq_best = sum_devsq
outkeep = out
return (outkeep.params['mu'].value,abs(outkeep.params['sig1'].value),
abs(outkeep.params['sig2'].value),abs(outkeep.params['gam1'].value),
abs(outkeep.params['gam2'].value),abs(outkeep.params['G1'].value),
abs(outkeep.params['G2'].value))
class DoubleLorGauss_Distribution(Distribution):
"""Defines a "double LorGauss" distribution according to the provided parameters.
Parameters
----------
mu,sig1,sig2,gam1,gam2,G1,G2 : float
Parameters of `double_lorgauss` function.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig1,sig2,gam1,gam2,G1,G2,**kwargs):
self.mu = mu
self.sig1 = sig1
self.sig2 = sig2
self.gam1 = gam1
self.gam2 = gam2
self.G1 = G1
#self.L1 = L1
self.G2 = G2
#self.L2 = L2
def pdf(x):
return double_lorgauss(x,(self.mu,self.sig1,self.sig2,
self.gam1,self.gam2,
self.G1,self.G2,))
keywords = {'mu':mu,'sig1':sig1,
'sig2':sig2,'gam1':gam1,'gam2':gam2,
'G1':G1,'G2':G2}
Distribution.__init__(self,pdf,keywords=keywords,**kwargs)
######## DoubleGauss #########
def doublegauss(x,p):
"""Evaluates normalized two-sided Gaussian distribution
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
Returns
-------
value : float or array-like
Distribution evaluated at input value(s). If single value provided,
single value returned.
"""
mu,sig1,sig2 = p
x = np.atleast_1d(x)
A = 1./(np.sqrt(2*np.pi)*(sig1+sig2)/2.)
ylo = A*np.exp(-(x-mu)**2/(2*sig1**2))
yhi = A*np.exp(-(x-mu)**2/(2*sig2**2))
y = x*0
wlo = np.where(x < mu)
whi = np.where(x >= mu)
y[wlo] = ylo[wlo]
y[whi] = yhi[whi]
if np.size(x)==1:
return y[0]
else:
return y
def doublegauss_cdf(x,p):
"""Cumulative distribution function for two-sided Gaussian
Parameters
----------
x : float
Input values at which to calculate CDF.
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
"""
x = np.atleast_1d(x)
mu,sig1,sig2 = p
sig1 = np.absolute(sig1)
sig2 = np.absolute(sig2)
ylo = float(sig1)/(sig1 + sig2)*(1 + erf((x-mu)/np.sqrt(2*sig1**2)))
yhi = float(sig1)/(sig1 + sig2) + float(sig2)/(sig1+sig2)*(erf((x-mu)/np.sqrt(2*sig2**2)))
lo = x < mu
hi = x >= mu
return ylo*lo + yhi*hi
def fit_doublegauss_samples(samples,**kwargs):
"""Fits a two-sided Gaussian to a set of samples.
Calculates 0.16, 0.5, and 0.84 quantiles and passes these to
`fit_doublegauss` for fitting.
Parameters
----------
samples : array-like
Samples to which to fit the Gaussian.
kwargs
Keyword arguments passed to `fit_doublegauss`.
"""
sorted_samples = np.sort(samples)
N = len(samples)
med = sorted_samples[N/2]
siglo = med - sorted_samples[int(0.16*N)]
sighi = sorted_samples[int(0.84*N)] - med
return fit_doublegauss(med,siglo,sighi,median=True,**kwargs)
def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True):
"""Fits a two-sided Gaussian distribution to match a given confidence interval.
The center of the distribution may be either the median or the mode.
Parameters
----------
med : float
The center of the distribution to which to fit. Default this
will be the mode unless the `median` keyword is set to True.
siglo : float
Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is
the "lower error bar."
sighi : float
Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is
the "upper error bar."
interval : float, optional
The confidence interval enclosed by the provided error bars. Default
is 0.683 (1-sigma).
p0 : array-like, optional
Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`).
median : bool, optional
Whether to treat the `med` parameter as the median or mode
(default will be mode).
return_distribution: bool, optional
If `True`, then function will return a `DoubleGauss_Distribution` object.
Otherwise, will return just the parameters.
"""
if median:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med,med+sighi])
qvals = np.array([q1,0.5,q2])
def objfn(pars):
logging.debug('{}'.format(pars))
logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals))
return doublegauss_cdf(targetvals,pars) - qvals
if p0 is None:
p0 = [med,siglo,sighi]
pfit,success = leastsq(objfn,p0)
else:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med+sighi])
qvals = np.array([q1,q2])
def objfn(pars):
params = (med,pars[0],pars[1])
return doublegauss_cdf(targetvals,params) - qvals
if p0 is None:
p0 = [siglo,sighi]
pfit,success = leastsq(objfn,p0)
pfit = (med,pfit[0],pfit[1])
if return_distribution:
dist = DoubleGauss_Distribution(*pfit)
return dist
else:
return pfit
class DoubleGauss_Distribution(Distribution):
"""A Distribution oject representing a two-sided Gaussian distribution
This can be used to represent a slightly asymmetric distribution,
and consists of two half-Normal distributions patched together at the
mode, and normalized appropriately. The pdf and cdf are according to
the `doubleguass` and `doubleguass_cdf` functions, respectively.
Parameters
----------
mu : float
The mode of the distribution.
siglo : float
Width of lower half-Gaussian.
sighi : float
Width of upper half-Gaussian.
kwargs
Keyword arguments are passed to `Distribution` constructor.
"""
def __init__(self,mu,siglo,sighi,**kwargs):
self.mu = mu
self.siglo = float(siglo)
self.sighi = float(sighi)
def pdf(x):
return doublegauss(x,(mu,siglo,sighi))
def cdf(x):
return doublegauss_cdf(x,(mu,siglo,sighi))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 5*siglo
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 5*sighi
keywords = {'mu':mu,'siglo':siglo,'sighi':sighi}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,self.mu,self.sighi,self.siglo)
def resample(self,N,**kwargs):
"""Random resampling of the doublegauss distribution
"""
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
hi = (u < float(self.sighi)/(self.sighi + self.siglo))
lo = (u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[hi] = hivals[hi]
vals[lo] = lovals[lo]
return vals
def powerlawfn(alpha,minval,maxval):
C = powerlawnorm(alpha,minval,maxval)
def fn(inpx):
x = np.atleast_1d(inpx)
y = C*x**(alpha)
y[(x < minval) | (x > maxval)] = 0
return y
return fn
def powerlawnorm(alpha,minval,maxval):
if np.size(alpha)==1:
if alpha == -1:
C = 1/np.log(maxval/minval)
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
else:
C = np.zeros(np.size(alpha))
w = np.where(alpha==-1)
if len(w[0]>0):
C[w] = 1./np.log(maxval/minval)*np.ones(len(w[0]))
nw = np.where(alpha != -1)
C[nw] = (1+alpha[nw])/(maxval**(1+alpha[nw])-minval**(1+alpha[nw]))
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
return C
class PowerLaw_Distribution(Distribution):
def __init__(self,alpha,minval,maxval,**kwargs):
self.alpha = alpha
pdf = powerlawfn(alpha,minval,maxval)
Distribution.__init__(self,pdf,minval=minval,maxval=maxval)
######## KDE ###########
class KDE_Distribution(Distribution):
def __init__(self,samples,adaptive=True,draw_direct=True,bandwidth=None,**kwargs):
self.samples = samples
self.bandwidth = bandwidth
self.kde = KDE(samples,adaptive=adaptive,draw_direct=draw_direct,
bandwidth=bandwidth)
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
keywords = {'adaptive':adaptive,'draw_direct':draw_direct,
'bandwidth':bandwidth}
Distribution.__init__(self,self.kde,keywords=keywords,**kwargs)
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def resample(self,N,**kwargs):
return self.kde.resample(N,**kwargs)
class KDE_Distribution_Fromtxt(KDE_Distribution):
def __init__(self,filename,**kwargs):
samples = np.loadtxt(filename)
KDE_Distribution.__init__(self,samples,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
fit_double_lorgauss
|
python
|
def fit_double_lorgauss(bins,h,Ntry=5):
try:
from lmfit import minimize, Parameters, Parameter, report_fit
except ImportError:
raise ImportError('you need lmfit to use this function.')
#make sure histogram is normalized
h /= np.trapz(h,bins)
#zero-pad the ends of the distribution to keep fits positive
N = len(bins)
dbin = (bins[1:]-bins[:-1]).mean()
newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10),
bins,
np.linspace(bins.max(),bins.max() + N/10*dbin,N/10)))
newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10)))
mu0 = bins[np.argmax(newh)]
sig0 = abs(mu0 - newbins[np.argmin(np.absolute(newh - 0.5*newh.max()))])
def set_params(G1,G2):
params = Parameters()
params.add('mu',value=mu0)
params.add('sig1',value=sig0)
params.add('sig2',value=sig0)
params.add('gam1',value=sig0/10)
params.add('gam2',value=sig0/10)
params.add('G1',value=G1)
params.add('G2',value=G2)
return params
sum_devsq_best = np.inf
outkeep = None
for G1 in np.linspace(0.1,1.9,Ntry):
for G2 in np.linspace(0.1,1.9,Ntry):
params = set_params(G1,G2)
def residual(ps):
pars = (params['mu'].value,
params['sig1'].value,
params['sig2'].value,
params['gam1'].value,
params['gam2'].value,
params['G1'].value,
params['G2'].value)
hmodel = double_lorgauss(newbins,pars)
return newh-hmodel
out = minimize(residual,params)
pars = (out.params['mu'].value,out.params['sig1'].value,
out.params['sig2'].value,out.params['gam1'].value,
out.params['gam2'].value,out.params['G1'].value,
out.params['G2'].value)
sum_devsq = ((newh - double_lorgauss(newbins,pars))**2).sum()
#print 'devs = %.1f; initial guesses for G1, G2; %.1f, %.1f' % (sum_devsq,G1, G2)
if sum_devsq < sum_devsq_best:
sum_devsq_best = sum_devsq
outkeep = out
return (outkeep.params['mu'].value,abs(outkeep.params['sig1'].value),
abs(outkeep.params['sig2'].value),abs(outkeep.params['gam1'].value),
abs(outkeep.params['gam2'].value),abs(outkeep.params['G1'].value),
abs(outkeep.params['G2'].value))
|
Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram.
Uses a grid of starting guesses to try to avoid local minima.
Parameters
----------
bins, h : array-like
Bins and heights of a histogram, as returned by, e.g., `np.histogram`.
Ntry : int, optional
Spacing of grid for starting guesses. Will try `Ntry**2` different
initial values of the "Gaussian strength" parameters `G1` and `G2`.
Returns
-------
parameters : tuple
Parameters of best-fit "double LorGauss" distribution.
Raises
------
ImportError
If the lmfit module is not available.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L666-L754
|
[
"def double_lorgauss(x,p):\n \"\"\"Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.\n\n Parameters\n ----------\n x : float or array-like\n Value(s) at which to evaluate distribution\n\n p : array-like\n Input parameters: mu (mode of distribution),\n sig1 (LH Gaussian width),\n sig2 (RH Gaussian width),\n gam1 (LH Lorentzian width),\n gam2 (RH Lorentzian width),\n G1 (LH Gaussian \"strength\"),\n G2 (RH Gaussian \"strength\").\n\n Returns\n -------\n values : float or array-like\n Double LorGauss distribution evaluated at input(s). If single value provided,\n single value returned. \n \"\"\"\n mu,sig1,sig2,gam1,gam2,G1,G2 = p\n gam1 = float(gam1)\n gam2 = float(gam2)\n\n G1 = abs(G1)\n G2 = abs(G2)\n sig1 = abs(sig1)\n sig2 = abs(sig2)\n gam1 = abs(gam1)\n gab2 = abs(gam2)\n\n L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) - \n (gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) +\n (gam2/gam1)*(4-G1-G2))\n L1 = 4 - G1 - G2 - L2\n\n\n #print G1,G2,L1,L2\n\n y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\\\n L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2)\n y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\\\n L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2)\n lo = (x < mu)\n hi = (x >= mu)\n\n return y1*lo + y2*hi\n",
"def set_params(G1,G2):\n params = Parameters()\n params.add('mu',value=mu0)\n params.add('sig1',value=sig0)\n params.add('sig2',value=sig0)\n params.add('gam1',value=sig0/10)\n params.add('gam2',value=sig0/10)\n params.add('G1',value=G1)\n params.add('G2',value=G2)\n return params\n"
] |
from __future__ import absolute_import, division, print_function
__author__ = 'Timothy D. Morton <tim.morton@gmail.com>'
"""
Defines objects useful for describing probability distributions.
"""
import numpy as np
import matplotlib.pyplot as plt
import logging
from scipy.interpolate import UnivariateSpline as interpolate
from scipy.integrate import quad
import numpy.random as rand
from scipy.special import erf
from scipy.optimize import leastsq
import pandas as pd
from plotutils import setfig
from .kde import KDE
#figure this generic loading thing out; draft stage currently
def load_distribution(filename,path=''):
fns = pd.read_hdf(filename,path)
store = pd.HDFStore(filename)
if '{}/samples'.format(path) in store:
samples = pd.read_hdf(filename,path+'/samples')
samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0)
cdf = interpolate(fns['vals'],fns['cdf'],s=0)
attrs = store.get_storer('{}/fns'.format(path)).attrs
keywords = attrs.keywords
t = attrs.disttype
store.close()
return t.__init__()
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
ppf = pctile
def save_hdf(self,filename,path='',res=1000,logspace=False):
"""Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
"""
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
"""
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
"""
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
class Distribution_FromH5(Distribution):
"""Creates a Distribution object from one saved to an HDF file.
File must have a `DataFrame` saved under [path]/fns in
the .h5 file, containing 'vals', 'pdf', and 'cdf' columns.
If samples are saved in the HDF storer, then they will be restored
to this object; so will any saved keyword attributes.
These appropriate .h5 files will be created by a call to the `save_hdf`
method of the generic `Distribution` class.
Parameters
----------
filename : string
.h5 file where the distribution is saved.
path : string, optional
Path within the .h5 file where the distribution is saved. By
default this will be the root level, but can be anywhere.
kwargs
Keyword arguments are passed to the `Distribution` constructor.
"""
def __init__(self,filename,path='',**kwargs):
store = pd.HDFStore(filename,'r')
fns = store[path+'/fns']
if '{}/samples'.format(path) in store:
samples = store[path+'/samples']
self.samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0,k=1)
#check to see if tabulated CDF is monotonically increasing
d_cdf = fns['cdf'][1:] - fns['cdf'][:-1]
if np.any(d_cdf < 0):
logging.warning('tabulated CDF in {} is not strictly increasing. Recalculating CDF from PDF'.format(filename))
cdf = None #in this case, just recalc cdf from pdf
else:
cdf = interpolate(fns['vals'],fns['cdf'],s=0,k=1)
Distribution.__init__(self,pdf,cdf,minval=minval,maxval=maxval,
**kwargs)
store = pd.HDFStore(filename,'r')
try:
keywords = store.get_storer('{}/fns'.format(path)).attrs.keywords
for kw,val in keywords.iteritems():
setattr(self,kw,val)
except AttributeError:
logging.warning('saved distribution {} does not have keywords or disttype saved; perhaps this distribution was written with an older version.'.format(filename))
store.close()
class Empirical_Distribution(Distribution):
"""Generates a Distribution object given a tabulated PDF.
Parameters
----------
xs : array-like
x-values at which the PDF is evaluated
pdf : array-like
Values of pdf at provided x-values.
smooth : int or float
Smoothing parameter used by the interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,xs,pdf,smooth=0,**kwargs):
pdf /= np.trapz(pdf,xs)
fn = interpolate(xs,pdf,s=smooth)
keywords = {'smooth':smooth}
Distribution.__init__(self,fn,minval=xs.min(),maxval=xs.max(),
keywords=keywords,**kwargs)
class Gaussian_Distribution(Distribution):
"""Generates a normal distribution with given mu, sigma.
***It's probably better to use scipy.stats.norm rather than this
if you care about numerical precision/speed and don't care about the
plotting bells/whistles etc. the `Distribution` class provides.***
Parameters
----------
mu : float
Mean of normal distribution.
sig : float
Width of normal distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig,**kwargs):
self.mu = mu
self.sig = sig
def pdf(x):
return 1./np.sqrt(2*np.pi*sig**2)*np.exp(-(x-mu)**2/(2*sig**2))
def cdf(x):
return 0.5*(1 + erf((x-mu)/np.sqrt(2*sig**2)))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 10*sig
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 10*sig
keywords = {'mu':self.mu,'sig':self.sig}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +/- %.2f' % (self.name,self.mu,self.sig)
def resample(self,N,**kwargs):
return rand.normal(size=int(N))*self.sig + self.mu
class Hist_Distribution(Distribution):
"""Generates a distribution from a histogram of provided samples.
Uses `np.histogram` to create a histogram using the bins keyword,
then interpolates this histogram to create the pdf to pass to the
`Distribution` constructor.
Parameters
----------
samples : array-like
The samples used to create the distribution
bins : int or array-like, optional
Keyword passed to `np.histogram`. If integer, ths will be
the number of bins, if array-like, then this defines bin edges.
equibin : bool, optional
If true and ``bins`` is an integer ``N``, then the bins will be
found by splitting the data into ``N`` equal-sized groups.
smooth : int or float
Smoothing parameter used by the interpolation function.
order : int
Order of the spline to be used for interpolation. Default is
for linear interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,samples,bins=10,equibin=True,smooth=0,order=1,**kwargs):
self.samples = samples
if type(bins)==type(10) and equibin:
N = len(samples)//bins
sortsamples = np.sort(samples)
bins = sortsamples[0::N]
if bins[-1] != sortsamples[-1]:
bins = np.concatenate([bins,np.array([sortsamples[-1]])])
hist,bins = np.histogram(samples,bins=bins,density=True)
self.bins = bins
bins = (bins[1:] + bins[:-1])/2.
pdf_initial = interpolate(bins,hist,s=smooth,k=order)
def pdf(x):
x = np.atleast_1d(x)
y = pdf_initial(x)
w = np.where((x < self.bins[0]) | (x > self.bins[-1]))
y[w] = 0
return y
cdf = interpolate(bins,hist.cumsum()/hist.cumsum().max(),s=smooth,
k=order)
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
keywords = {'bins':bins,'smooth':smooth,'order':order}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def plothist(self,fig=None,**kwargs):
"""Plots a histogram of samples using provided bins.
Parameters
----------
fig : None or int
Parameter passed to `setfig`.
kwargs
Keyword arguments passed to `plt.hist`.
"""
setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
def resample(self,N):
"""Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
"""
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
class Box_Distribution(Distribution):
"""Simple distribution uniform between provided lower and upper limits.
Parameters
----------
lo,hi : float
Lower/upper limits of the distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,lo,hi,**kwargs):
self.lo = lo
self.hi = hi
def pdf(x):
return 1./(hi-lo) + 0*x
def cdf(x):
x = np.atleast_1d(x)
y = (x - lo) / (hi - lo)
y[x < lo] = 0
y[x > hi] = 1
return y
Distribution.__init__(self,pdf,cdf,minval=lo,maxval=hi,**kwargs)
def __str__(self):
return '%.1f < %s < %.1f' % (self.lo,self.name,self.hi)
def resample(self,N):
"""Returns a random sampling.
"""
return rand.random(size=N)*(self.maxval - self.minval) + self.minval
############## Double LorGauss ###########
def double_lorgauss(x,p):
"""Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Input parameters: mu (mode of distribution),
sig1 (LH Gaussian width),
sig2 (RH Gaussian width),
gam1 (LH Lorentzian width),
gam2 (RH Lorentzian width),
G1 (LH Gaussian "strength"),
G2 (RH Gaussian "strength").
Returns
-------
values : float or array-like
Double LorGauss distribution evaluated at input(s). If single value provided,
single value returned.
"""
mu,sig1,sig2,gam1,gam2,G1,G2 = p
gam1 = float(gam1)
gam2 = float(gam2)
G1 = abs(G1)
G2 = abs(G2)
sig1 = abs(sig1)
sig2 = abs(sig2)
gam1 = abs(gam1)
gab2 = abs(gam2)
L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) -
(gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) +
(gam2/gam1)*(4-G1-G2))
L1 = 4 - G1 - G2 - L2
#print G1,G2,L1,L2
y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\
L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2)
y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\
L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2)
lo = (x < mu)
hi = (x >= mu)
return y1*lo + y2*hi
def fit_double_lorgauss(bins,h,Ntry=5):
"""Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram.
Uses a grid of starting guesses to try to avoid local minima.
Parameters
----------
bins, h : array-like
Bins and heights of a histogram, as returned by, e.g., `np.histogram`.
Ntry : int, optional
Spacing of grid for starting guesses. Will try `Ntry**2` different
initial values of the "Gaussian strength" parameters `G1` and `G2`.
Returns
-------
parameters : tuple
Parameters of best-fit "double LorGauss" distribution.
Raises
------
ImportError
If the lmfit module is not available.
"""
try:
from lmfit import minimize, Parameters, Parameter, report_fit
except ImportError:
raise ImportError('you need lmfit to use this function.')
#make sure histogram is normalized
h /= np.trapz(h,bins)
#zero-pad the ends of the distribution to keep fits positive
N = len(bins)
dbin = (bins[1:]-bins[:-1]).mean()
newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10),
bins,
np.linspace(bins.max(),bins.max() + N/10*dbin,N/10)))
newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10)))
mu0 = bins[np.argmax(newh)]
sig0 = abs(mu0 - newbins[np.argmin(np.absolute(newh - 0.5*newh.max()))])
def set_params(G1,G2):
params = Parameters()
params.add('mu',value=mu0)
params.add('sig1',value=sig0)
params.add('sig2',value=sig0)
params.add('gam1',value=sig0/10)
params.add('gam2',value=sig0/10)
params.add('G1',value=G1)
params.add('G2',value=G2)
return params
sum_devsq_best = np.inf
outkeep = None
for G1 in np.linspace(0.1,1.9,Ntry):
for G2 in np.linspace(0.1,1.9,Ntry):
params = set_params(G1,G2)
def residual(ps):
pars = (params['mu'].value,
params['sig1'].value,
params['sig2'].value,
params['gam1'].value,
params['gam2'].value,
params['G1'].value,
params['G2'].value)
hmodel = double_lorgauss(newbins,pars)
return newh-hmodel
out = minimize(residual,params)
pars = (out.params['mu'].value,out.params['sig1'].value,
out.params['sig2'].value,out.params['gam1'].value,
out.params['gam2'].value,out.params['G1'].value,
out.params['G2'].value)
sum_devsq = ((newh - double_lorgauss(newbins,pars))**2).sum()
#print 'devs = %.1f; initial guesses for G1, G2; %.1f, %.1f' % (sum_devsq,G1, G2)
if sum_devsq < sum_devsq_best:
sum_devsq_best = sum_devsq
outkeep = out
return (outkeep.params['mu'].value,abs(outkeep.params['sig1'].value),
abs(outkeep.params['sig2'].value),abs(outkeep.params['gam1'].value),
abs(outkeep.params['gam2'].value),abs(outkeep.params['G1'].value),
abs(outkeep.params['G2'].value))
class DoubleLorGauss_Distribution(Distribution):
"""Defines a "double LorGauss" distribution according to the provided parameters.
Parameters
----------
mu,sig1,sig2,gam1,gam2,G1,G2 : float
Parameters of `double_lorgauss` function.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig1,sig2,gam1,gam2,G1,G2,**kwargs):
self.mu = mu
self.sig1 = sig1
self.sig2 = sig2
self.gam1 = gam1
self.gam2 = gam2
self.G1 = G1
#self.L1 = L1
self.G2 = G2
#self.L2 = L2
def pdf(x):
return double_lorgauss(x,(self.mu,self.sig1,self.sig2,
self.gam1,self.gam2,
self.G1,self.G2,))
keywords = {'mu':mu,'sig1':sig1,
'sig2':sig2,'gam1':gam1,'gam2':gam2,
'G1':G1,'G2':G2}
Distribution.__init__(self,pdf,keywords=keywords,**kwargs)
######## DoubleGauss #########
def doublegauss(x,p):
"""Evaluates normalized two-sided Gaussian distribution
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
Returns
-------
value : float or array-like
Distribution evaluated at input value(s). If single value provided,
single value returned.
"""
mu,sig1,sig2 = p
x = np.atleast_1d(x)
A = 1./(np.sqrt(2*np.pi)*(sig1+sig2)/2.)
ylo = A*np.exp(-(x-mu)**2/(2*sig1**2))
yhi = A*np.exp(-(x-mu)**2/(2*sig2**2))
y = x*0
wlo = np.where(x < mu)
whi = np.where(x >= mu)
y[wlo] = ylo[wlo]
y[whi] = yhi[whi]
if np.size(x)==1:
return y[0]
else:
return y
def doublegauss_cdf(x,p):
"""Cumulative distribution function for two-sided Gaussian
Parameters
----------
x : float
Input values at which to calculate CDF.
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
"""
x = np.atleast_1d(x)
mu,sig1,sig2 = p
sig1 = np.absolute(sig1)
sig2 = np.absolute(sig2)
ylo = float(sig1)/(sig1 + sig2)*(1 + erf((x-mu)/np.sqrt(2*sig1**2)))
yhi = float(sig1)/(sig1 + sig2) + float(sig2)/(sig1+sig2)*(erf((x-mu)/np.sqrt(2*sig2**2)))
lo = x < mu
hi = x >= mu
return ylo*lo + yhi*hi
def fit_doublegauss_samples(samples,**kwargs):
"""Fits a two-sided Gaussian to a set of samples.
Calculates 0.16, 0.5, and 0.84 quantiles and passes these to
`fit_doublegauss` for fitting.
Parameters
----------
samples : array-like
Samples to which to fit the Gaussian.
kwargs
Keyword arguments passed to `fit_doublegauss`.
"""
sorted_samples = np.sort(samples)
N = len(samples)
med = sorted_samples[N/2]
siglo = med - sorted_samples[int(0.16*N)]
sighi = sorted_samples[int(0.84*N)] - med
return fit_doublegauss(med,siglo,sighi,median=True,**kwargs)
def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True):
"""Fits a two-sided Gaussian distribution to match a given confidence interval.
The center of the distribution may be either the median or the mode.
Parameters
----------
med : float
The center of the distribution to which to fit. Default this
will be the mode unless the `median` keyword is set to True.
siglo : float
Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is
the "lower error bar."
sighi : float
Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is
the "upper error bar."
interval : float, optional
The confidence interval enclosed by the provided error bars. Default
is 0.683 (1-sigma).
p0 : array-like, optional
Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`).
median : bool, optional
Whether to treat the `med` parameter as the median or mode
(default will be mode).
return_distribution: bool, optional
If `True`, then function will return a `DoubleGauss_Distribution` object.
Otherwise, will return just the parameters.
"""
if median:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med,med+sighi])
qvals = np.array([q1,0.5,q2])
def objfn(pars):
logging.debug('{}'.format(pars))
logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals))
return doublegauss_cdf(targetvals,pars) - qvals
if p0 is None:
p0 = [med,siglo,sighi]
pfit,success = leastsq(objfn,p0)
else:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med+sighi])
qvals = np.array([q1,q2])
def objfn(pars):
params = (med,pars[0],pars[1])
return doublegauss_cdf(targetvals,params) - qvals
if p0 is None:
p0 = [siglo,sighi]
pfit,success = leastsq(objfn,p0)
pfit = (med,pfit[0],pfit[1])
if return_distribution:
dist = DoubleGauss_Distribution(*pfit)
return dist
else:
return pfit
class DoubleGauss_Distribution(Distribution):
"""A Distribution oject representing a two-sided Gaussian distribution
This can be used to represent a slightly asymmetric distribution,
and consists of two half-Normal distributions patched together at the
mode, and normalized appropriately. The pdf and cdf are according to
the `doubleguass` and `doubleguass_cdf` functions, respectively.
Parameters
----------
mu : float
The mode of the distribution.
siglo : float
Width of lower half-Gaussian.
sighi : float
Width of upper half-Gaussian.
kwargs
Keyword arguments are passed to `Distribution` constructor.
"""
def __init__(self,mu,siglo,sighi,**kwargs):
self.mu = mu
self.siglo = float(siglo)
self.sighi = float(sighi)
def pdf(x):
return doublegauss(x,(mu,siglo,sighi))
def cdf(x):
return doublegauss_cdf(x,(mu,siglo,sighi))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 5*siglo
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 5*sighi
keywords = {'mu':mu,'siglo':siglo,'sighi':sighi}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,self.mu,self.sighi,self.siglo)
def resample(self,N,**kwargs):
"""Random resampling of the doublegauss distribution
"""
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
hi = (u < float(self.sighi)/(self.sighi + self.siglo))
lo = (u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[hi] = hivals[hi]
vals[lo] = lovals[lo]
return vals
def powerlawfn(alpha,minval,maxval):
C = powerlawnorm(alpha,minval,maxval)
def fn(inpx):
x = np.atleast_1d(inpx)
y = C*x**(alpha)
y[(x < minval) | (x > maxval)] = 0
return y
return fn
def powerlawnorm(alpha,minval,maxval):
if np.size(alpha)==1:
if alpha == -1:
C = 1/np.log(maxval/minval)
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
else:
C = np.zeros(np.size(alpha))
w = np.where(alpha==-1)
if len(w[0]>0):
C[w] = 1./np.log(maxval/minval)*np.ones(len(w[0]))
nw = np.where(alpha != -1)
C[nw] = (1+alpha[nw])/(maxval**(1+alpha[nw])-minval**(1+alpha[nw]))
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
return C
class PowerLaw_Distribution(Distribution):
def __init__(self,alpha,minval,maxval,**kwargs):
self.alpha = alpha
pdf = powerlawfn(alpha,minval,maxval)
Distribution.__init__(self,pdf,minval=minval,maxval=maxval)
######## KDE ###########
class KDE_Distribution(Distribution):
def __init__(self,samples,adaptive=True,draw_direct=True,bandwidth=None,**kwargs):
self.samples = samples
self.bandwidth = bandwidth
self.kde = KDE(samples,adaptive=adaptive,draw_direct=draw_direct,
bandwidth=bandwidth)
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
keywords = {'adaptive':adaptive,'draw_direct':draw_direct,
'bandwidth':bandwidth}
Distribution.__init__(self,self.kde,keywords=keywords,**kwargs)
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def resample(self,N,**kwargs):
return self.kde.resample(N,**kwargs)
class KDE_Distribution_Fromtxt(KDE_Distribution):
def __init__(self,filename,**kwargs):
samples = np.loadtxt(filename)
KDE_Distribution.__init__(self,samples,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
doublegauss
|
python
|
def doublegauss(x,p):
mu,sig1,sig2 = p
x = np.atleast_1d(x)
A = 1./(np.sqrt(2*np.pi)*(sig1+sig2)/2.)
ylo = A*np.exp(-(x-mu)**2/(2*sig1**2))
yhi = A*np.exp(-(x-mu)**2/(2*sig2**2))
y = x*0
wlo = np.where(x < mu)
whi = np.where(x >= mu)
y[wlo] = ylo[wlo]
y[whi] = yhi[whi]
if np.size(x)==1:
return y[0]
else:
return y
|
Evaluates normalized two-sided Gaussian distribution
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
Returns
-------
value : float or array-like
Distribution evaluated at input value(s). If single value provided,
single value returned.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L792-L824
| null |
from __future__ import absolute_import, division, print_function
__author__ = 'Timothy D. Morton <tim.morton@gmail.com>'
"""
Defines objects useful for describing probability distributions.
"""
import numpy as np
import matplotlib.pyplot as plt
import logging
from scipy.interpolate import UnivariateSpline as interpolate
from scipy.integrate import quad
import numpy.random as rand
from scipy.special import erf
from scipy.optimize import leastsq
import pandas as pd
from plotutils import setfig
from .kde import KDE
#figure this generic loading thing out; draft stage currently
def load_distribution(filename,path=''):
fns = pd.read_hdf(filename,path)
store = pd.HDFStore(filename)
if '{}/samples'.format(path) in store:
samples = pd.read_hdf(filename,path+'/samples')
samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0)
cdf = interpolate(fns['vals'],fns['cdf'],s=0)
attrs = store.get_storer('{}/fns'.format(path)).attrs
keywords = attrs.keywords
t = attrs.disttype
store.close()
return t.__init__()
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
ppf = pctile
def save_hdf(self,filename,path='',res=1000,logspace=False):
"""Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
"""
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
"""
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
"""
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
class Distribution_FromH5(Distribution):
"""Creates a Distribution object from one saved to an HDF file.
File must have a `DataFrame` saved under [path]/fns in
the .h5 file, containing 'vals', 'pdf', and 'cdf' columns.
If samples are saved in the HDF storer, then they will be restored
to this object; so will any saved keyword attributes.
These appropriate .h5 files will be created by a call to the `save_hdf`
method of the generic `Distribution` class.
Parameters
----------
filename : string
.h5 file where the distribution is saved.
path : string, optional
Path within the .h5 file where the distribution is saved. By
default this will be the root level, but can be anywhere.
kwargs
Keyword arguments are passed to the `Distribution` constructor.
"""
def __init__(self,filename,path='',**kwargs):
store = pd.HDFStore(filename,'r')
fns = store[path+'/fns']
if '{}/samples'.format(path) in store:
samples = store[path+'/samples']
self.samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0,k=1)
#check to see if tabulated CDF is monotonically increasing
d_cdf = fns['cdf'][1:] - fns['cdf'][:-1]
if np.any(d_cdf < 0):
logging.warning('tabulated CDF in {} is not strictly increasing. Recalculating CDF from PDF'.format(filename))
cdf = None #in this case, just recalc cdf from pdf
else:
cdf = interpolate(fns['vals'],fns['cdf'],s=0,k=1)
Distribution.__init__(self,pdf,cdf,minval=minval,maxval=maxval,
**kwargs)
store = pd.HDFStore(filename,'r')
try:
keywords = store.get_storer('{}/fns'.format(path)).attrs.keywords
for kw,val in keywords.iteritems():
setattr(self,kw,val)
except AttributeError:
logging.warning('saved distribution {} does not have keywords or disttype saved; perhaps this distribution was written with an older version.'.format(filename))
store.close()
class Empirical_Distribution(Distribution):
"""Generates a Distribution object given a tabulated PDF.
Parameters
----------
xs : array-like
x-values at which the PDF is evaluated
pdf : array-like
Values of pdf at provided x-values.
smooth : int or float
Smoothing parameter used by the interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,xs,pdf,smooth=0,**kwargs):
pdf /= np.trapz(pdf,xs)
fn = interpolate(xs,pdf,s=smooth)
keywords = {'smooth':smooth}
Distribution.__init__(self,fn,minval=xs.min(),maxval=xs.max(),
keywords=keywords,**kwargs)
class Gaussian_Distribution(Distribution):
"""Generates a normal distribution with given mu, sigma.
***It's probably better to use scipy.stats.norm rather than this
if you care about numerical precision/speed and don't care about the
plotting bells/whistles etc. the `Distribution` class provides.***
Parameters
----------
mu : float
Mean of normal distribution.
sig : float
Width of normal distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig,**kwargs):
self.mu = mu
self.sig = sig
def pdf(x):
return 1./np.sqrt(2*np.pi*sig**2)*np.exp(-(x-mu)**2/(2*sig**2))
def cdf(x):
return 0.5*(1 + erf((x-mu)/np.sqrt(2*sig**2)))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 10*sig
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 10*sig
keywords = {'mu':self.mu,'sig':self.sig}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +/- %.2f' % (self.name,self.mu,self.sig)
def resample(self,N,**kwargs):
return rand.normal(size=int(N))*self.sig + self.mu
class Hist_Distribution(Distribution):
"""Generates a distribution from a histogram of provided samples.
Uses `np.histogram` to create a histogram using the bins keyword,
then interpolates this histogram to create the pdf to pass to the
`Distribution` constructor.
Parameters
----------
samples : array-like
The samples used to create the distribution
bins : int or array-like, optional
Keyword passed to `np.histogram`. If integer, ths will be
the number of bins, if array-like, then this defines bin edges.
equibin : bool, optional
If true and ``bins`` is an integer ``N``, then the bins will be
found by splitting the data into ``N`` equal-sized groups.
smooth : int or float
Smoothing parameter used by the interpolation function.
order : int
Order of the spline to be used for interpolation. Default is
for linear interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,samples,bins=10,equibin=True,smooth=0,order=1,**kwargs):
self.samples = samples
if type(bins)==type(10) and equibin:
N = len(samples)//bins
sortsamples = np.sort(samples)
bins = sortsamples[0::N]
if bins[-1] != sortsamples[-1]:
bins = np.concatenate([bins,np.array([sortsamples[-1]])])
hist,bins = np.histogram(samples,bins=bins,density=True)
self.bins = bins
bins = (bins[1:] + bins[:-1])/2.
pdf_initial = interpolate(bins,hist,s=smooth,k=order)
def pdf(x):
x = np.atleast_1d(x)
y = pdf_initial(x)
w = np.where((x < self.bins[0]) | (x > self.bins[-1]))
y[w] = 0
return y
cdf = interpolate(bins,hist.cumsum()/hist.cumsum().max(),s=smooth,
k=order)
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
keywords = {'bins':bins,'smooth':smooth,'order':order}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def plothist(self,fig=None,**kwargs):
"""Plots a histogram of samples using provided bins.
Parameters
----------
fig : None or int
Parameter passed to `setfig`.
kwargs
Keyword arguments passed to `plt.hist`.
"""
setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
def resample(self,N):
"""Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
"""
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
class Box_Distribution(Distribution):
"""Simple distribution uniform between provided lower and upper limits.
Parameters
----------
lo,hi : float
Lower/upper limits of the distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,lo,hi,**kwargs):
self.lo = lo
self.hi = hi
def pdf(x):
return 1./(hi-lo) + 0*x
def cdf(x):
x = np.atleast_1d(x)
y = (x - lo) / (hi - lo)
y[x < lo] = 0
y[x > hi] = 1
return y
Distribution.__init__(self,pdf,cdf,minval=lo,maxval=hi,**kwargs)
def __str__(self):
return '%.1f < %s < %.1f' % (self.lo,self.name,self.hi)
def resample(self,N):
"""Returns a random sampling.
"""
return rand.random(size=N)*(self.maxval - self.minval) + self.minval
############## Double LorGauss ###########
def double_lorgauss(x,p):
"""Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Input parameters: mu (mode of distribution),
sig1 (LH Gaussian width),
sig2 (RH Gaussian width),
gam1 (LH Lorentzian width),
gam2 (RH Lorentzian width),
G1 (LH Gaussian "strength"),
G2 (RH Gaussian "strength").
Returns
-------
values : float or array-like
Double LorGauss distribution evaluated at input(s). If single value provided,
single value returned.
"""
mu,sig1,sig2,gam1,gam2,G1,G2 = p
gam1 = float(gam1)
gam2 = float(gam2)
G1 = abs(G1)
G2 = abs(G2)
sig1 = abs(sig1)
sig2 = abs(sig2)
gam1 = abs(gam1)
gab2 = abs(gam2)
L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) -
(gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) +
(gam2/gam1)*(4-G1-G2))
L1 = 4 - G1 - G2 - L2
#print G1,G2,L1,L2
y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\
L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2)
y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\
L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2)
lo = (x < mu)
hi = (x >= mu)
return y1*lo + y2*hi
def fit_double_lorgauss(bins,h,Ntry=5):
"""Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram.
Uses a grid of starting guesses to try to avoid local minima.
Parameters
----------
bins, h : array-like
Bins and heights of a histogram, as returned by, e.g., `np.histogram`.
Ntry : int, optional
Spacing of grid for starting guesses. Will try `Ntry**2` different
initial values of the "Gaussian strength" parameters `G1` and `G2`.
Returns
-------
parameters : tuple
Parameters of best-fit "double LorGauss" distribution.
Raises
------
ImportError
If the lmfit module is not available.
"""
try:
from lmfit import minimize, Parameters, Parameter, report_fit
except ImportError:
raise ImportError('you need lmfit to use this function.')
#make sure histogram is normalized
h /= np.trapz(h,bins)
#zero-pad the ends of the distribution to keep fits positive
N = len(bins)
dbin = (bins[1:]-bins[:-1]).mean()
newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10),
bins,
np.linspace(bins.max(),bins.max() + N/10*dbin,N/10)))
newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10)))
mu0 = bins[np.argmax(newh)]
sig0 = abs(mu0 - newbins[np.argmin(np.absolute(newh - 0.5*newh.max()))])
def set_params(G1,G2):
params = Parameters()
params.add('mu',value=mu0)
params.add('sig1',value=sig0)
params.add('sig2',value=sig0)
params.add('gam1',value=sig0/10)
params.add('gam2',value=sig0/10)
params.add('G1',value=G1)
params.add('G2',value=G2)
return params
sum_devsq_best = np.inf
outkeep = None
for G1 in np.linspace(0.1,1.9,Ntry):
for G2 in np.linspace(0.1,1.9,Ntry):
params = set_params(G1,G2)
def residual(ps):
pars = (params['mu'].value,
params['sig1'].value,
params['sig2'].value,
params['gam1'].value,
params['gam2'].value,
params['G1'].value,
params['G2'].value)
hmodel = double_lorgauss(newbins,pars)
return newh-hmodel
out = minimize(residual,params)
pars = (out.params['mu'].value,out.params['sig1'].value,
out.params['sig2'].value,out.params['gam1'].value,
out.params['gam2'].value,out.params['G1'].value,
out.params['G2'].value)
sum_devsq = ((newh - double_lorgauss(newbins,pars))**2).sum()
#print 'devs = %.1f; initial guesses for G1, G2; %.1f, %.1f' % (sum_devsq,G1, G2)
if sum_devsq < sum_devsq_best:
sum_devsq_best = sum_devsq
outkeep = out
return (outkeep.params['mu'].value,abs(outkeep.params['sig1'].value),
abs(outkeep.params['sig2'].value),abs(outkeep.params['gam1'].value),
abs(outkeep.params['gam2'].value),abs(outkeep.params['G1'].value),
abs(outkeep.params['G2'].value))
class DoubleLorGauss_Distribution(Distribution):
"""Defines a "double LorGauss" distribution according to the provided parameters.
Parameters
----------
mu,sig1,sig2,gam1,gam2,G1,G2 : float
Parameters of `double_lorgauss` function.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig1,sig2,gam1,gam2,G1,G2,**kwargs):
self.mu = mu
self.sig1 = sig1
self.sig2 = sig2
self.gam1 = gam1
self.gam2 = gam2
self.G1 = G1
#self.L1 = L1
self.G2 = G2
#self.L2 = L2
def pdf(x):
return double_lorgauss(x,(self.mu,self.sig1,self.sig2,
self.gam1,self.gam2,
self.G1,self.G2,))
keywords = {'mu':mu,'sig1':sig1,
'sig2':sig2,'gam1':gam1,'gam2':gam2,
'G1':G1,'G2':G2}
Distribution.__init__(self,pdf,keywords=keywords,**kwargs)
######## DoubleGauss #########
def doublegauss_cdf(x,p):
"""Cumulative distribution function for two-sided Gaussian
Parameters
----------
x : float
Input values at which to calculate CDF.
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
"""
x = np.atleast_1d(x)
mu,sig1,sig2 = p
sig1 = np.absolute(sig1)
sig2 = np.absolute(sig2)
ylo = float(sig1)/(sig1 + sig2)*(1 + erf((x-mu)/np.sqrt(2*sig1**2)))
yhi = float(sig1)/(sig1 + sig2) + float(sig2)/(sig1+sig2)*(erf((x-mu)/np.sqrt(2*sig2**2)))
lo = x < mu
hi = x >= mu
return ylo*lo + yhi*hi
def fit_doublegauss_samples(samples,**kwargs):
"""Fits a two-sided Gaussian to a set of samples.
Calculates 0.16, 0.5, and 0.84 quantiles and passes these to
`fit_doublegauss` for fitting.
Parameters
----------
samples : array-like
Samples to which to fit the Gaussian.
kwargs
Keyword arguments passed to `fit_doublegauss`.
"""
sorted_samples = np.sort(samples)
N = len(samples)
med = sorted_samples[N/2]
siglo = med - sorted_samples[int(0.16*N)]
sighi = sorted_samples[int(0.84*N)] - med
return fit_doublegauss(med,siglo,sighi,median=True,**kwargs)
def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True):
"""Fits a two-sided Gaussian distribution to match a given confidence interval.
The center of the distribution may be either the median or the mode.
Parameters
----------
med : float
The center of the distribution to which to fit. Default this
will be the mode unless the `median` keyword is set to True.
siglo : float
Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is
the "lower error bar."
sighi : float
Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is
the "upper error bar."
interval : float, optional
The confidence interval enclosed by the provided error bars. Default
is 0.683 (1-sigma).
p0 : array-like, optional
Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`).
median : bool, optional
Whether to treat the `med` parameter as the median or mode
(default will be mode).
return_distribution: bool, optional
If `True`, then function will return a `DoubleGauss_Distribution` object.
Otherwise, will return just the parameters.
"""
if median:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med,med+sighi])
qvals = np.array([q1,0.5,q2])
def objfn(pars):
logging.debug('{}'.format(pars))
logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals))
return doublegauss_cdf(targetvals,pars) - qvals
if p0 is None:
p0 = [med,siglo,sighi]
pfit,success = leastsq(objfn,p0)
else:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med+sighi])
qvals = np.array([q1,q2])
def objfn(pars):
params = (med,pars[0],pars[1])
return doublegauss_cdf(targetvals,params) - qvals
if p0 is None:
p0 = [siglo,sighi]
pfit,success = leastsq(objfn,p0)
pfit = (med,pfit[0],pfit[1])
if return_distribution:
dist = DoubleGauss_Distribution(*pfit)
return dist
else:
return pfit
class DoubleGauss_Distribution(Distribution):
"""A Distribution oject representing a two-sided Gaussian distribution
This can be used to represent a slightly asymmetric distribution,
and consists of two half-Normal distributions patched together at the
mode, and normalized appropriately. The pdf and cdf are according to
the `doubleguass` and `doubleguass_cdf` functions, respectively.
Parameters
----------
mu : float
The mode of the distribution.
siglo : float
Width of lower half-Gaussian.
sighi : float
Width of upper half-Gaussian.
kwargs
Keyword arguments are passed to `Distribution` constructor.
"""
def __init__(self,mu,siglo,sighi,**kwargs):
self.mu = mu
self.siglo = float(siglo)
self.sighi = float(sighi)
def pdf(x):
return doublegauss(x,(mu,siglo,sighi))
def cdf(x):
return doublegauss_cdf(x,(mu,siglo,sighi))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 5*siglo
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 5*sighi
keywords = {'mu':mu,'siglo':siglo,'sighi':sighi}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,self.mu,self.sighi,self.siglo)
def resample(self,N,**kwargs):
"""Random resampling of the doublegauss distribution
"""
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
hi = (u < float(self.sighi)/(self.sighi + self.siglo))
lo = (u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[hi] = hivals[hi]
vals[lo] = lovals[lo]
return vals
def powerlawfn(alpha,minval,maxval):
C = powerlawnorm(alpha,minval,maxval)
def fn(inpx):
x = np.atleast_1d(inpx)
y = C*x**(alpha)
y[(x < minval) | (x > maxval)] = 0
return y
return fn
def powerlawnorm(alpha,minval,maxval):
if np.size(alpha)==1:
if alpha == -1:
C = 1/np.log(maxval/minval)
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
else:
C = np.zeros(np.size(alpha))
w = np.where(alpha==-1)
if len(w[0]>0):
C[w] = 1./np.log(maxval/minval)*np.ones(len(w[0]))
nw = np.where(alpha != -1)
C[nw] = (1+alpha[nw])/(maxval**(1+alpha[nw])-minval**(1+alpha[nw]))
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
return C
class PowerLaw_Distribution(Distribution):
def __init__(self,alpha,minval,maxval,**kwargs):
self.alpha = alpha
pdf = powerlawfn(alpha,minval,maxval)
Distribution.__init__(self,pdf,minval=minval,maxval=maxval)
######## KDE ###########
class KDE_Distribution(Distribution):
def __init__(self,samples,adaptive=True,draw_direct=True,bandwidth=None,**kwargs):
self.samples = samples
self.bandwidth = bandwidth
self.kde = KDE(samples,adaptive=adaptive,draw_direct=draw_direct,
bandwidth=bandwidth)
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
keywords = {'adaptive':adaptive,'draw_direct':draw_direct,
'bandwidth':bandwidth}
Distribution.__init__(self,self.kde,keywords=keywords,**kwargs)
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def resample(self,N,**kwargs):
return self.kde.resample(N,**kwargs)
class KDE_Distribution_Fromtxt(KDE_Distribution):
def __init__(self,filename,**kwargs):
samples = np.loadtxt(filename)
KDE_Distribution.__init__(self,samples,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
doublegauss_cdf
|
python
|
def doublegauss_cdf(x,p):
x = np.atleast_1d(x)
mu,sig1,sig2 = p
sig1 = np.absolute(sig1)
sig2 = np.absolute(sig2)
ylo = float(sig1)/(sig1 + sig2)*(1 + erf((x-mu)/np.sqrt(2*sig1**2)))
yhi = float(sig1)/(sig1 + sig2) + float(sig2)/(sig1+sig2)*(erf((x-mu)/np.sqrt(2*sig2**2)))
lo = x < mu
hi = x >= mu
return ylo*lo + yhi*hi
|
Cumulative distribution function for two-sided Gaussian
Parameters
----------
x : float
Input values at which to calculate CDF.
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L826-L847
| null |
from __future__ import absolute_import, division, print_function
__author__ = 'Timothy D. Morton <tim.morton@gmail.com>'
"""
Defines objects useful for describing probability distributions.
"""
import numpy as np
import matplotlib.pyplot as plt
import logging
from scipy.interpolate import UnivariateSpline as interpolate
from scipy.integrate import quad
import numpy.random as rand
from scipy.special import erf
from scipy.optimize import leastsq
import pandas as pd
from plotutils import setfig
from .kde import KDE
#figure this generic loading thing out; draft stage currently
def load_distribution(filename,path=''):
fns = pd.read_hdf(filename,path)
store = pd.HDFStore(filename)
if '{}/samples'.format(path) in store:
samples = pd.read_hdf(filename,path+'/samples')
samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0)
cdf = interpolate(fns['vals'],fns['cdf'],s=0)
attrs = store.get_storer('{}/fns'.format(path)).attrs
keywords = attrs.keywords
t = attrs.disttype
store.close()
return t.__init__()
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
ppf = pctile
def save_hdf(self,filename,path='',res=1000,logspace=False):
"""Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
"""
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
"""
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
"""
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
class Distribution_FromH5(Distribution):
"""Creates a Distribution object from one saved to an HDF file.
File must have a `DataFrame` saved under [path]/fns in
the .h5 file, containing 'vals', 'pdf', and 'cdf' columns.
If samples are saved in the HDF storer, then they will be restored
to this object; so will any saved keyword attributes.
These appropriate .h5 files will be created by a call to the `save_hdf`
method of the generic `Distribution` class.
Parameters
----------
filename : string
.h5 file where the distribution is saved.
path : string, optional
Path within the .h5 file where the distribution is saved. By
default this will be the root level, but can be anywhere.
kwargs
Keyword arguments are passed to the `Distribution` constructor.
"""
def __init__(self,filename,path='',**kwargs):
store = pd.HDFStore(filename,'r')
fns = store[path+'/fns']
if '{}/samples'.format(path) in store:
samples = store[path+'/samples']
self.samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0,k=1)
#check to see if tabulated CDF is monotonically increasing
d_cdf = fns['cdf'][1:] - fns['cdf'][:-1]
if np.any(d_cdf < 0):
logging.warning('tabulated CDF in {} is not strictly increasing. Recalculating CDF from PDF'.format(filename))
cdf = None #in this case, just recalc cdf from pdf
else:
cdf = interpolate(fns['vals'],fns['cdf'],s=0,k=1)
Distribution.__init__(self,pdf,cdf,minval=minval,maxval=maxval,
**kwargs)
store = pd.HDFStore(filename,'r')
try:
keywords = store.get_storer('{}/fns'.format(path)).attrs.keywords
for kw,val in keywords.iteritems():
setattr(self,kw,val)
except AttributeError:
logging.warning('saved distribution {} does not have keywords or disttype saved; perhaps this distribution was written with an older version.'.format(filename))
store.close()
class Empirical_Distribution(Distribution):
"""Generates a Distribution object given a tabulated PDF.
Parameters
----------
xs : array-like
x-values at which the PDF is evaluated
pdf : array-like
Values of pdf at provided x-values.
smooth : int or float
Smoothing parameter used by the interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,xs,pdf,smooth=0,**kwargs):
pdf /= np.trapz(pdf,xs)
fn = interpolate(xs,pdf,s=smooth)
keywords = {'smooth':smooth}
Distribution.__init__(self,fn,minval=xs.min(),maxval=xs.max(),
keywords=keywords,**kwargs)
class Gaussian_Distribution(Distribution):
"""Generates a normal distribution with given mu, sigma.
***It's probably better to use scipy.stats.norm rather than this
if you care about numerical precision/speed and don't care about the
plotting bells/whistles etc. the `Distribution` class provides.***
Parameters
----------
mu : float
Mean of normal distribution.
sig : float
Width of normal distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig,**kwargs):
self.mu = mu
self.sig = sig
def pdf(x):
return 1./np.sqrt(2*np.pi*sig**2)*np.exp(-(x-mu)**2/(2*sig**2))
def cdf(x):
return 0.5*(1 + erf((x-mu)/np.sqrt(2*sig**2)))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 10*sig
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 10*sig
keywords = {'mu':self.mu,'sig':self.sig}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +/- %.2f' % (self.name,self.mu,self.sig)
def resample(self,N,**kwargs):
return rand.normal(size=int(N))*self.sig + self.mu
class Hist_Distribution(Distribution):
"""Generates a distribution from a histogram of provided samples.
Uses `np.histogram` to create a histogram using the bins keyword,
then interpolates this histogram to create the pdf to pass to the
`Distribution` constructor.
Parameters
----------
samples : array-like
The samples used to create the distribution
bins : int or array-like, optional
Keyword passed to `np.histogram`. If integer, ths will be
the number of bins, if array-like, then this defines bin edges.
equibin : bool, optional
If true and ``bins`` is an integer ``N``, then the bins will be
found by splitting the data into ``N`` equal-sized groups.
smooth : int or float
Smoothing parameter used by the interpolation function.
order : int
Order of the spline to be used for interpolation. Default is
for linear interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,samples,bins=10,equibin=True,smooth=0,order=1,**kwargs):
self.samples = samples
if type(bins)==type(10) and equibin:
N = len(samples)//bins
sortsamples = np.sort(samples)
bins = sortsamples[0::N]
if bins[-1] != sortsamples[-1]:
bins = np.concatenate([bins,np.array([sortsamples[-1]])])
hist,bins = np.histogram(samples,bins=bins,density=True)
self.bins = bins
bins = (bins[1:] + bins[:-1])/2.
pdf_initial = interpolate(bins,hist,s=smooth,k=order)
def pdf(x):
x = np.atleast_1d(x)
y = pdf_initial(x)
w = np.where((x < self.bins[0]) | (x > self.bins[-1]))
y[w] = 0
return y
cdf = interpolate(bins,hist.cumsum()/hist.cumsum().max(),s=smooth,
k=order)
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
keywords = {'bins':bins,'smooth':smooth,'order':order}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def plothist(self,fig=None,**kwargs):
"""Plots a histogram of samples using provided bins.
Parameters
----------
fig : None or int
Parameter passed to `setfig`.
kwargs
Keyword arguments passed to `plt.hist`.
"""
setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
def resample(self,N):
"""Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
"""
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
class Box_Distribution(Distribution):
"""Simple distribution uniform between provided lower and upper limits.
Parameters
----------
lo,hi : float
Lower/upper limits of the distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,lo,hi,**kwargs):
self.lo = lo
self.hi = hi
def pdf(x):
return 1./(hi-lo) + 0*x
def cdf(x):
x = np.atleast_1d(x)
y = (x - lo) / (hi - lo)
y[x < lo] = 0
y[x > hi] = 1
return y
Distribution.__init__(self,pdf,cdf,minval=lo,maxval=hi,**kwargs)
def __str__(self):
return '%.1f < %s < %.1f' % (self.lo,self.name,self.hi)
def resample(self,N):
"""Returns a random sampling.
"""
return rand.random(size=N)*(self.maxval - self.minval) + self.minval
############## Double LorGauss ###########
def double_lorgauss(x,p):
"""Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Input parameters: mu (mode of distribution),
sig1 (LH Gaussian width),
sig2 (RH Gaussian width),
gam1 (LH Lorentzian width),
gam2 (RH Lorentzian width),
G1 (LH Gaussian "strength"),
G2 (RH Gaussian "strength").
Returns
-------
values : float or array-like
Double LorGauss distribution evaluated at input(s). If single value provided,
single value returned.
"""
mu,sig1,sig2,gam1,gam2,G1,G2 = p
gam1 = float(gam1)
gam2 = float(gam2)
G1 = abs(G1)
G2 = abs(G2)
sig1 = abs(sig1)
sig2 = abs(sig2)
gam1 = abs(gam1)
gab2 = abs(gam2)
L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) -
(gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) +
(gam2/gam1)*(4-G1-G2))
L1 = 4 - G1 - G2 - L2
#print G1,G2,L1,L2
y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\
L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2)
y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\
L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2)
lo = (x < mu)
hi = (x >= mu)
return y1*lo + y2*hi
def fit_double_lorgauss(bins,h,Ntry=5):
"""Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram.
Uses a grid of starting guesses to try to avoid local minima.
Parameters
----------
bins, h : array-like
Bins and heights of a histogram, as returned by, e.g., `np.histogram`.
Ntry : int, optional
Spacing of grid for starting guesses. Will try `Ntry**2` different
initial values of the "Gaussian strength" parameters `G1` and `G2`.
Returns
-------
parameters : tuple
Parameters of best-fit "double LorGauss" distribution.
Raises
------
ImportError
If the lmfit module is not available.
"""
try:
from lmfit import minimize, Parameters, Parameter, report_fit
except ImportError:
raise ImportError('you need lmfit to use this function.')
#make sure histogram is normalized
h /= np.trapz(h,bins)
#zero-pad the ends of the distribution to keep fits positive
N = len(bins)
dbin = (bins[1:]-bins[:-1]).mean()
newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10),
bins,
np.linspace(bins.max(),bins.max() + N/10*dbin,N/10)))
newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10)))
mu0 = bins[np.argmax(newh)]
sig0 = abs(mu0 - newbins[np.argmin(np.absolute(newh - 0.5*newh.max()))])
def set_params(G1,G2):
params = Parameters()
params.add('mu',value=mu0)
params.add('sig1',value=sig0)
params.add('sig2',value=sig0)
params.add('gam1',value=sig0/10)
params.add('gam2',value=sig0/10)
params.add('G1',value=G1)
params.add('G2',value=G2)
return params
sum_devsq_best = np.inf
outkeep = None
for G1 in np.linspace(0.1,1.9,Ntry):
for G2 in np.linspace(0.1,1.9,Ntry):
params = set_params(G1,G2)
def residual(ps):
pars = (params['mu'].value,
params['sig1'].value,
params['sig2'].value,
params['gam1'].value,
params['gam2'].value,
params['G1'].value,
params['G2'].value)
hmodel = double_lorgauss(newbins,pars)
return newh-hmodel
out = minimize(residual,params)
pars = (out.params['mu'].value,out.params['sig1'].value,
out.params['sig2'].value,out.params['gam1'].value,
out.params['gam2'].value,out.params['G1'].value,
out.params['G2'].value)
sum_devsq = ((newh - double_lorgauss(newbins,pars))**2).sum()
#print 'devs = %.1f; initial guesses for G1, G2; %.1f, %.1f' % (sum_devsq,G1, G2)
if sum_devsq < sum_devsq_best:
sum_devsq_best = sum_devsq
outkeep = out
return (outkeep.params['mu'].value,abs(outkeep.params['sig1'].value),
abs(outkeep.params['sig2'].value),abs(outkeep.params['gam1'].value),
abs(outkeep.params['gam2'].value),abs(outkeep.params['G1'].value),
abs(outkeep.params['G2'].value))
class DoubleLorGauss_Distribution(Distribution):
"""Defines a "double LorGauss" distribution according to the provided parameters.
Parameters
----------
mu,sig1,sig2,gam1,gam2,G1,G2 : float
Parameters of `double_lorgauss` function.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig1,sig2,gam1,gam2,G1,G2,**kwargs):
self.mu = mu
self.sig1 = sig1
self.sig2 = sig2
self.gam1 = gam1
self.gam2 = gam2
self.G1 = G1
#self.L1 = L1
self.G2 = G2
#self.L2 = L2
def pdf(x):
return double_lorgauss(x,(self.mu,self.sig1,self.sig2,
self.gam1,self.gam2,
self.G1,self.G2,))
keywords = {'mu':mu,'sig1':sig1,
'sig2':sig2,'gam1':gam1,'gam2':gam2,
'G1':G1,'G2':G2}
Distribution.__init__(self,pdf,keywords=keywords,**kwargs)
######## DoubleGauss #########
def doublegauss(x,p):
"""Evaluates normalized two-sided Gaussian distribution
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
Returns
-------
value : float or array-like
Distribution evaluated at input value(s). If single value provided,
single value returned.
"""
mu,sig1,sig2 = p
x = np.atleast_1d(x)
A = 1./(np.sqrt(2*np.pi)*(sig1+sig2)/2.)
ylo = A*np.exp(-(x-mu)**2/(2*sig1**2))
yhi = A*np.exp(-(x-mu)**2/(2*sig2**2))
y = x*0
wlo = np.where(x < mu)
whi = np.where(x >= mu)
y[wlo] = ylo[wlo]
y[whi] = yhi[whi]
if np.size(x)==1:
return y[0]
else:
return y
def fit_doublegauss_samples(samples,**kwargs):
"""Fits a two-sided Gaussian to a set of samples.
Calculates 0.16, 0.5, and 0.84 quantiles and passes these to
`fit_doublegauss` for fitting.
Parameters
----------
samples : array-like
Samples to which to fit the Gaussian.
kwargs
Keyword arguments passed to `fit_doublegauss`.
"""
sorted_samples = np.sort(samples)
N = len(samples)
med = sorted_samples[N/2]
siglo = med - sorted_samples[int(0.16*N)]
sighi = sorted_samples[int(0.84*N)] - med
return fit_doublegauss(med,siglo,sighi,median=True,**kwargs)
def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True):
"""Fits a two-sided Gaussian distribution to match a given confidence interval.
The center of the distribution may be either the median or the mode.
Parameters
----------
med : float
The center of the distribution to which to fit. Default this
will be the mode unless the `median` keyword is set to True.
siglo : float
Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is
the "lower error bar."
sighi : float
Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is
the "upper error bar."
interval : float, optional
The confidence interval enclosed by the provided error bars. Default
is 0.683 (1-sigma).
p0 : array-like, optional
Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`).
median : bool, optional
Whether to treat the `med` parameter as the median or mode
(default will be mode).
return_distribution: bool, optional
If `True`, then function will return a `DoubleGauss_Distribution` object.
Otherwise, will return just the parameters.
"""
if median:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med,med+sighi])
qvals = np.array([q1,0.5,q2])
def objfn(pars):
logging.debug('{}'.format(pars))
logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals))
return doublegauss_cdf(targetvals,pars) - qvals
if p0 is None:
p0 = [med,siglo,sighi]
pfit,success = leastsq(objfn,p0)
else:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med+sighi])
qvals = np.array([q1,q2])
def objfn(pars):
params = (med,pars[0],pars[1])
return doublegauss_cdf(targetvals,params) - qvals
if p0 is None:
p0 = [siglo,sighi]
pfit,success = leastsq(objfn,p0)
pfit = (med,pfit[0],pfit[1])
if return_distribution:
dist = DoubleGauss_Distribution(*pfit)
return dist
else:
return pfit
class DoubleGauss_Distribution(Distribution):
"""A Distribution oject representing a two-sided Gaussian distribution
This can be used to represent a slightly asymmetric distribution,
and consists of two half-Normal distributions patched together at the
mode, and normalized appropriately. The pdf and cdf are according to
the `doubleguass` and `doubleguass_cdf` functions, respectively.
Parameters
----------
mu : float
The mode of the distribution.
siglo : float
Width of lower half-Gaussian.
sighi : float
Width of upper half-Gaussian.
kwargs
Keyword arguments are passed to `Distribution` constructor.
"""
def __init__(self,mu,siglo,sighi,**kwargs):
self.mu = mu
self.siglo = float(siglo)
self.sighi = float(sighi)
def pdf(x):
return doublegauss(x,(mu,siglo,sighi))
def cdf(x):
return doublegauss_cdf(x,(mu,siglo,sighi))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 5*siglo
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 5*sighi
keywords = {'mu':mu,'siglo':siglo,'sighi':sighi}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,self.mu,self.sighi,self.siglo)
def resample(self,N,**kwargs):
"""Random resampling of the doublegauss distribution
"""
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
hi = (u < float(self.sighi)/(self.sighi + self.siglo))
lo = (u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[hi] = hivals[hi]
vals[lo] = lovals[lo]
return vals
def powerlawfn(alpha,minval,maxval):
C = powerlawnorm(alpha,minval,maxval)
def fn(inpx):
x = np.atleast_1d(inpx)
y = C*x**(alpha)
y[(x < minval) | (x > maxval)] = 0
return y
return fn
def powerlawnorm(alpha,minval,maxval):
if np.size(alpha)==1:
if alpha == -1:
C = 1/np.log(maxval/minval)
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
else:
C = np.zeros(np.size(alpha))
w = np.where(alpha==-1)
if len(w[0]>0):
C[w] = 1./np.log(maxval/minval)*np.ones(len(w[0]))
nw = np.where(alpha != -1)
C[nw] = (1+alpha[nw])/(maxval**(1+alpha[nw])-minval**(1+alpha[nw]))
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
return C
class PowerLaw_Distribution(Distribution):
def __init__(self,alpha,minval,maxval,**kwargs):
self.alpha = alpha
pdf = powerlawfn(alpha,minval,maxval)
Distribution.__init__(self,pdf,minval=minval,maxval=maxval)
######## KDE ###########
class KDE_Distribution(Distribution):
def __init__(self,samples,adaptive=True,draw_direct=True,bandwidth=None,**kwargs):
self.samples = samples
self.bandwidth = bandwidth
self.kde = KDE(samples,adaptive=adaptive,draw_direct=draw_direct,
bandwidth=bandwidth)
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
keywords = {'adaptive':adaptive,'draw_direct':draw_direct,
'bandwidth':bandwidth}
Distribution.__init__(self,self.kde,keywords=keywords,**kwargs)
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def resample(self,N,**kwargs):
return self.kde.resample(N,**kwargs)
class KDE_Distribution_Fromtxt(KDE_Distribution):
def __init__(self,filename,**kwargs):
samples = np.loadtxt(filename)
KDE_Distribution.__init__(self,samples,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
fit_doublegauss_samples
|
python
|
def fit_doublegauss_samples(samples,**kwargs):
sorted_samples = np.sort(samples)
N = len(samples)
med = sorted_samples[N/2]
siglo = med - sorted_samples[int(0.16*N)]
sighi = sorted_samples[int(0.84*N)] - med
return fit_doublegauss(med,siglo,sighi,median=True,**kwargs)
|
Fits a two-sided Gaussian to a set of samples.
Calculates 0.16, 0.5, and 0.84 quantiles and passes these to
`fit_doublegauss` for fitting.
Parameters
----------
samples : array-like
Samples to which to fit the Gaussian.
kwargs
Keyword arguments passed to `fit_doublegauss`.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L849-L868
|
[
"def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True):\n \"\"\"Fits a two-sided Gaussian distribution to match a given confidence interval.\n\n The center of the distribution may be either the median or the mode.\n\n Parameters\n ----------\n med : float\n The center of the distribution to which to fit. Default this\n will be the mode unless the `median` keyword is set to True.\n\n siglo : float\n Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is\n the \"lower error bar.\"\n\n sighi : float\n Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is\n the \"upper error bar.\"\n\n interval : float, optional\n The confidence interval enclosed by the provided error bars. Default\n is 0.683 (1-sigma).\n\n p0 : array-like, optional\n Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`).\n\n median : bool, optional\n Whether to treat the `med` parameter as the median or mode\n (default will be mode).\n\n return_distribution: bool, optional\n If `True`, then function will return a `DoubleGauss_Distribution` object.\n Otherwise, will return just the parameters.\n \"\"\"\n if median:\n q1 = 0.5 - (interval/2)\n q2 = 0.5 + (interval/2)\n targetvals = np.array([med-siglo,med,med+sighi])\n qvals = np.array([q1,0.5,q2])\n def objfn(pars):\n logging.debug('{}'.format(pars))\n logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals))\n return doublegauss_cdf(targetvals,pars) - qvals\n\n if p0 is None:\n p0 = [med,siglo,sighi]\n pfit,success = leastsq(objfn,p0)\n\n else:\n q1 = 0.5 - (interval/2)\n q2 = 0.5 + (interval/2)\n targetvals = np.array([med-siglo,med+sighi])\n qvals = np.array([q1,q2])\n def objfn(pars):\n params = (med,pars[0],pars[1])\n return doublegauss_cdf(targetvals,params) - qvals\n\n if p0 is None:\n p0 = [siglo,sighi]\n pfit,success = leastsq(objfn,p0)\n pfit = (med,pfit[0],pfit[1])\n\n if return_distribution:\n dist = DoubleGauss_Distribution(*pfit)\n return dist\n else:\n return pfit\n"
] |
from __future__ import absolute_import, division, print_function
__author__ = 'Timothy D. Morton <tim.morton@gmail.com>'
"""
Defines objects useful for describing probability distributions.
"""
import numpy as np
import matplotlib.pyplot as plt
import logging
from scipy.interpolate import UnivariateSpline as interpolate
from scipy.integrate import quad
import numpy.random as rand
from scipy.special import erf
from scipy.optimize import leastsq
import pandas as pd
from plotutils import setfig
from .kde import KDE
#figure this generic loading thing out; draft stage currently
def load_distribution(filename,path=''):
fns = pd.read_hdf(filename,path)
store = pd.HDFStore(filename)
if '{}/samples'.format(path) in store:
samples = pd.read_hdf(filename,path+'/samples')
samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0)
cdf = interpolate(fns['vals'],fns['cdf'],s=0)
attrs = store.get_storer('{}/fns'.format(path)).attrs
keywords = attrs.keywords
t = attrs.disttype
store.close()
return t.__init__()
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
ppf = pctile
def save_hdf(self,filename,path='',res=1000,logspace=False):
"""Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
"""
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
"""
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
"""
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
class Distribution_FromH5(Distribution):
"""Creates a Distribution object from one saved to an HDF file.
File must have a `DataFrame` saved under [path]/fns in
the .h5 file, containing 'vals', 'pdf', and 'cdf' columns.
If samples are saved in the HDF storer, then they will be restored
to this object; so will any saved keyword attributes.
These appropriate .h5 files will be created by a call to the `save_hdf`
method of the generic `Distribution` class.
Parameters
----------
filename : string
.h5 file where the distribution is saved.
path : string, optional
Path within the .h5 file where the distribution is saved. By
default this will be the root level, but can be anywhere.
kwargs
Keyword arguments are passed to the `Distribution` constructor.
"""
def __init__(self,filename,path='',**kwargs):
store = pd.HDFStore(filename,'r')
fns = store[path+'/fns']
if '{}/samples'.format(path) in store:
samples = store[path+'/samples']
self.samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0,k=1)
#check to see if tabulated CDF is monotonically increasing
d_cdf = fns['cdf'][1:] - fns['cdf'][:-1]
if np.any(d_cdf < 0):
logging.warning('tabulated CDF in {} is not strictly increasing. Recalculating CDF from PDF'.format(filename))
cdf = None #in this case, just recalc cdf from pdf
else:
cdf = interpolate(fns['vals'],fns['cdf'],s=0,k=1)
Distribution.__init__(self,pdf,cdf,minval=minval,maxval=maxval,
**kwargs)
store = pd.HDFStore(filename,'r')
try:
keywords = store.get_storer('{}/fns'.format(path)).attrs.keywords
for kw,val in keywords.iteritems():
setattr(self,kw,val)
except AttributeError:
logging.warning('saved distribution {} does not have keywords or disttype saved; perhaps this distribution was written with an older version.'.format(filename))
store.close()
class Empirical_Distribution(Distribution):
"""Generates a Distribution object given a tabulated PDF.
Parameters
----------
xs : array-like
x-values at which the PDF is evaluated
pdf : array-like
Values of pdf at provided x-values.
smooth : int or float
Smoothing parameter used by the interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,xs,pdf,smooth=0,**kwargs):
pdf /= np.trapz(pdf,xs)
fn = interpolate(xs,pdf,s=smooth)
keywords = {'smooth':smooth}
Distribution.__init__(self,fn,minval=xs.min(),maxval=xs.max(),
keywords=keywords,**kwargs)
class Gaussian_Distribution(Distribution):
"""Generates a normal distribution with given mu, sigma.
***It's probably better to use scipy.stats.norm rather than this
if you care about numerical precision/speed and don't care about the
plotting bells/whistles etc. the `Distribution` class provides.***
Parameters
----------
mu : float
Mean of normal distribution.
sig : float
Width of normal distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig,**kwargs):
self.mu = mu
self.sig = sig
def pdf(x):
return 1./np.sqrt(2*np.pi*sig**2)*np.exp(-(x-mu)**2/(2*sig**2))
def cdf(x):
return 0.5*(1 + erf((x-mu)/np.sqrt(2*sig**2)))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 10*sig
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 10*sig
keywords = {'mu':self.mu,'sig':self.sig}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +/- %.2f' % (self.name,self.mu,self.sig)
def resample(self,N,**kwargs):
return rand.normal(size=int(N))*self.sig + self.mu
class Hist_Distribution(Distribution):
"""Generates a distribution from a histogram of provided samples.
Uses `np.histogram` to create a histogram using the bins keyword,
then interpolates this histogram to create the pdf to pass to the
`Distribution` constructor.
Parameters
----------
samples : array-like
The samples used to create the distribution
bins : int or array-like, optional
Keyword passed to `np.histogram`. If integer, ths will be
the number of bins, if array-like, then this defines bin edges.
equibin : bool, optional
If true and ``bins`` is an integer ``N``, then the bins will be
found by splitting the data into ``N`` equal-sized groups.
smooth : int or float
Smoothing parameter used by the interpolation function.
order : int
Order of the spline to be used for interpolation. Default is
for linear interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,samples,bins=10,equibin=True,smooth=0,order=1,**kwargs):
self.samples = samples
if type(bins)==type(10) and equibin:
N = len(samples)//bins
sortsamples = np.sort(samples)
bins = sortsamples[0::N]
if bins[-1] != sortsamples[-1]:
bins = np.concatenate([bins,np.array([sortsamples[-1]])])
hist,bins = np.histogram(samples,bins=bins,density=True)
self.bins = bins
bins = (bins[1:] + bins[:-1])/2.
pdf_initial = interpolate(bins,hist,s=smooth,k=order)
def pdf(x):
x = np.atleast_1d(x)
y = pdf_initial(x)
w = np.where((x < self.bins[0]) | (x > self.bins[-1]))
y[w] = 0
return y
cdf = interpolate(bins,hist.cumsum()/hist.cumsum().max(),s=smooth,
k=order)
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
keywords = {'bins':bins,'smooth':smooth,'order':order}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def plothist(self,fig=None,**kwargs):
"""Plots a histogram of samples using provided bins.
Parameters
----------
fig : None or int
Parameter passed to `setfig`.
kwargs
Keyword arguments passed to `plt.hist`.
"""
setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
def resample(self,N):
"""Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
"""
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
class Box_Distribution(Distribution):
"""Simple distribution uniform between provided lower and upper limits.
Parameters
----------
lo,hi : float
Lower/upper limits of the distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,lo,hi,**kwargs):
self.lo = lo
self.hi = hi
def pdf(x):
return 1./(hi-lo) + 0*x
def cdf(x):
x = np.atleast_1d(x)
y = (x - lo) / (hi - lo)
y[x < lo] = 0
y[x > hi] = 1
return y
Distribution.__init__(self,pdf,cdf,minval=lo,maxval=hi,**kwargs)
def __str__(self):
return '%.1f < %s < %.1f' % (self.lo,self.name,self.hi)
def resample(self,N):
"""Returns a random sampling.
"""
return rand.random(size=N)*(self.maxval - self.minval) + self.minval
############## Double LorGauss ###########
def double_lorgauss(x,p):
"""Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Input parameters: mu (mode of distribution),
sig1 (LH Gaussian width),
sig2 (RH Gaussian width),
gam1 (LH Lorentzian width),
gam2 (RH Lorentzian width),
G1 (LH Gaussian "strength"),
G2 (RH Gaussian "strength").
Returns
-------
values : float or array-like
Double LorGauss distribution evaluated at input(s). If single value provided,
single value returned.
"""
mu,sig1,sig2,gam1,gam2,G1,G2 = p
gam1 = float(gam1)
gam2 = float(gam2)
G1 = abs(G1)
G2 = abs(G2)
sig1 = abs(sig1)
sig2 = abs(sig2)
gam1 = abs(gam1)
gab2 = abs(gam2)
L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) -
(gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) +
(gam2/gam1)*(4-G1-G2))
L1 = 4 - G1 - G2 - L2
#print G1,G2,L1,L2
y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\
L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2)
y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\
L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2)
lo = (x < mu)
hi = (x >= mu)
return y1*lo + y2*hi
def fit_double_lorgauss(bins,h,Ntry=5):
"""Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram.
Uses a grid of starting guesses to try to avoid local minima.
Parameters
----------
bins, h : array-like
Bins and heights of a histogram, as returned by, e.g., `np.histogram`.
Ntry : int, optional
Spacing of grid for starting guesses. Will try `Ntry**2` different
initial values of the "Gaussian strength" parameters `G1` and `G2`.
Returns
-------
parameters : tuple
Parameters of best-fit "double LorGauss" distribution.
Raises
------
ImportError
If the lmfit module is not available.
"""
try:
from lmfit import minimize, Parameters, Parameter, report_fit
except ImportError:
raise ImportError('you need lmfit to use this function.')
#make sure histogram is normalized
h /= np.trapz(h,bins)
#zero-pad the ends of the distribution to keep fits positive
N = len(bins)
dbin = (bins[1:]-bins[:-1]).mean()
newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10),
bins,
np.linspace(bins.max(),bins.max() + N/10*dbin,N/10)))
newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10)))
mu0 = bins[np.argmax(newh)]
sig0 = abs(mu0 - newbins[np.argmin(np.absolute(newh - 0.5*newh.max()))])
def set_params(G1,G2):
params = Parameters()
params.add('mu',value=mu0)
params.add('sig1',value=sig0)
params.add('sig2',value=sig0)
params.add('gam1',value=sig0/10)
params.add('gam2',value=sig0/10)
params.add('G1',value=G1)
params.add('G2',value=G2)
return params
sum_devsq_best = np.inf
outkeep = None
for G1 in np.linspace(0.1,1.9,Ntry):
for G2 in np.linspace(0.1,1.9,Ntry):
params = set_params(G1,G2)
def residual(ps):
pars = (params['mu'].value,
params['sig1'].value,
params['sig2'].value,
params['gam1'].value,
params['gam2'].value,
params['G1'].value,
params['G2'].value)
hmodel = double_lorgauss(newbins,pars)
return newh-hmodel
out = minimize(residual,params)
pars = (out.params['mu'].value,out.params['sig1'].value,
out.params['sig2'].value,out.params['gam1'].value,
out.params['gam2'].value,out.params['G1'].value,
out.params['G2'].value)
sum_devsq = ((newh - double_lorgauss(newbins,pars))**2).sum()
#print 'devs = %.1f; initial guesses for G1, G2; %.1f, %.1f' % (sum_devsq,G1, G2)
if sum_devsq < sum_devsq_best:
sum_devsq_best = sum_devsq
outkeep = out
return (outkeep.params['mu'].value,abs(outkeep.params['sig1'].value),
abs(outkeep.params['sig2'].value),abs(outkeep.params['gam1'].value),
abs(outkeep.params['gam2'].value),abs(outkeep.params['G1'].value),
abs(outkeep.params['G2'].value))
class DoubleLorGauss_Distribution(Distribution):
"""Defines a "double LorGauss" distribution according to the provided parameters.
Parameters
----------
mu,sig1,sig2,gam1,gam2,G1,G2 : float
Parameters of `double_lorgauss` function.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig1,sig2,gam1,gam2,G1,G2,**kwargs):
self.mu = mu
self.sig1 = sig1
self.sig2 = sig2
self.gam1 = gam1
self.gam2 = gam2
self.G1 = G1
#self.L1 = L1
self.G2 = G2
#self.L2 = L2
def pdf(x):
return double_lorgauss(x,(self.mu,self.sig1,self.sig2,
self.gam1,self.gam2,
self.G1,self.G2,))
keywords = {'mu':mu,'sig1':sig1,
'sig2':sig2,'gam1':gam1,'gam2':gam2,
'G1':G1,'G2':G2}
Distribution.__init__(self,pdf,keywords=keywords,**kwargs)
######## DoubleGauss #########
def doublegauss(x,p):
"""Evaluates normalized two-sided Gaussian distribution
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
Returns
-------
value : float or array-like
Distribution evaluated at input value(s). If single value provided,
single value returned.
"""
mu,sig1,sig2 = p
x = np.atleast_1d(x)
A = 1./(np.sqrt(2*np.pi)*(sig1+sig2)/2.)
ylo = A*np.exp(-(x-mu)**2/(2*sig1**2))
yhi = A*np.exp(-(x-mu)**2/(2*sig2**2))
y = x*0
wlo = np.where(x < mu)
whi = np.where(x >= mu)
y[wlo] = ylo[wlo]
y[whi] = yhi[whi]
if np.size(x)==1:
return y[0]
else:
return y
def doublegauss_cdf(x,p):
"""Cumulative distribution function for two-sided Gaussian
Parameters
----------
x : float
Input values at which to calculate CDF.
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
"""
x = np.atleast_1d(x)
mu,sig1,sig2 = p
sig1 = np.absolute(sig1)
sig2 = np.absolute(sig2)
ylo = float(sig1)/(sig1 + sig2)*(1 + erf((x-mu)/np.sqrt(2*sig1**2)))
yhi = float(sig1)/(sig1 + sig2) + float(sig2)/(sig1+sig2)*(erf((x-mu)/np.sqrt(2*sig2**2)))
lo = x < mu
hi = x >= mu
return ylo*lo + yhi*hi
def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True):
"""Fits a two-sided Gaussian distribution to match a given confidence interval.
The center of the distribution may be either the median or the mode.
Parameters
----------
med : float
The center of the distribution to which to fit. Default this
will be the mode unless the `median` keyword is set to True.
siglo : float
Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is
the "lower error bar."
sighi : float
Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is
the "upper error bar."
interval : float, optional
The confidence interval enclosed by the provided error bars. Default
is 0.683 (1-sigma).
p0 : array-like, optional
Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`).
median : bool, optional
Whether to treat the `med` parameter as the median or mode
(default will be mode).
return_distribution: bool, optional
If `True`, then function will return a `DoubleGauss_Distribution` object.
Otherwise, will return just the parameters.
"""
if median:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med,med+sighi])
qvals = np.array([q1,0.5,q2])
def objfn(pars):
logging.debug('{}'.format(pars))
logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals))
return doublegauss_cdf(targetvals,pars) - qvals
if p0 is None:
p0 = [med,siglo,sighi]
pfit,success = leastsq(objfn,p0)
else:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med+sighi])
qvals = np.array([q1,q2])
def objfn(pars):
params = (med,pars[0],pars[1])
return doublegauss_cdf(targetvals,params) - qvals
if p0 is None:
p0 = [siglo,sighi]
pfit,success = leastsq(objfn,p0)
pfit = (med,pfit[0],pfit[1])
if return_distribution:
dist = DoubleGauss_Distribution(*pfit)
return dist
else:
return pfit
class DoubleGauss_Distribution(Distribution):
"""A Distribution oject representing a two-sided Gaussian distribution
This can be used to represent a slightly asymmetric distribution,
and consists of two half-Normal distributions patched together at the
mode, and normalized appropriately. The pdf and cdf are according to
the `doubleguass` and `doubleguass_cdf` functions, respectively.
Parameters
----------
mu : float
The mode of the distribution.
siglo : float
Width of lower half-Gaussian.
sighi : float
Width of upper half-Gaussian.
kwargs
Keyword arguments are passed to `Distribution` constructor.
"""
def __init__(self,mu,siglo,sighi,**kwargs):
self.mu = mu
self.siglo = float(siglo)
self.sighi = float(sighi)
def pdf(x):
return doublegauss(x,(mu,siglo,sighi))
def cdf(x):
return doublegauss_cdf(x,(mu,siglo,sighi))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 5*siglo
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 5*sighi
keywords = {'mu':mu,'siglo':siglo,'sighi':sighi}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,self.mu,self.sighi,self.siglo)
def resample(self,N,**kwargs):
"""Random resampling of the doublegauss distribution
"""
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
hi = (u < float(self.sighi)/(self.sighi + self.siglo))
lo = (u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[hi] = hivals[hi]
vals[lo] = lovals[lo]
return vals
def powerlawfn(alpha,minval,maxval):
C = powerlawnorm(alpha,minval,maxval)
def fn(inpx):
x = np.atleast_1d(inpx)
y = C*x**(alpha)
y[(x < minval) | (x > maxval)] = 0
return y
return fn
def powerlawnorm(alpha,minval,maxval):
if np.size(alpha)==1:
if alpha == -1:
C = 1/np.log(maxval/minval)
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
else:
C = np.zeros(np.size(alpha))
w = np.where(alpha==-1)
if len(w[0]>0):
C[w] = 1./np.log(maxval/minval)*np.ones(len(w[0]))
nw = np.where(alpha != -1)
C[nw] = (1+alpha[nw])/(maxval**(1+alpha[nw])-minval**(1+alpha[nw]))
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
return C
class PowerLaw_Distribution(Distribution):
def __init__(self,alpha,minval,maxval,**kwargs):
self.alpha = alpha
pdf = powerlawfn(alpha,minval,maxval)
Distribution.__init__(self,pdf,minval=minval,maxval=maxval)
######## KDE ###########
class KDE_Distribution(Distribution):
def __init__(self,samples,adaptive=True,draw_direct=True,bandwidth=None,**kwargs):
self.samples = samples
self.bandwidth = bandwidth
self.kde = KDE(samples,adaptive=adaptive,draw_direct=draw_direct,
bandwidth=bandwidth)
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
keywords = {'adaptive':adaptive,'draw_direct':draw_direct,
'bandwidth':bandwidth}
Distribution.__init__(self,self.kde,keywords=keywords,**kwargs)
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def resample(self,N,**kwargs):
return self.kde.resample(N,**kwargs)
class KDE_Distribution_Fromtxt(KDE_Distribution):
def __init__(self,filename,**kwargs):
samples = np.loadtxt(filename)
KDE_Distribution.__init__(self,samples,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
fit_doublegauss
|
python
|
def fit_doublegauss(med,siglo,sighi,interval=0.683,p0=None,median=False,return_distribution=True):
if median:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med,med+sighi])
qvals = np.array([q1,0.5,q2])
def objfn(pars):
logging.debug('{}'.format(pars))
logging.debug('{} {}'.format(doublegauss_cdf(targetvals,pars),qvals))
return doublegauss_cdf(targetvals,pars) - qvals
if p0 is None:
p0 = [med,siglo,sighi]
pfit,success = leastsq(objfn,p0)
else:
q1 = 0.5 - (interval/2)
q2 = 0.5 + (interval/2)
targetvals = np.array([med-siglo,med+sighi])
qvals = np.array([q1,q2])
def objfn(pars):
params = (med,pars[0],pars[1])
return doublegauss_cdf(targetvals,params) - qvals
if p0 is None:
p0 = [siglo,sighi]
pfit,success = leastsq(objfn,p0)
pfit = (med,pfit[0],pfit[1])
if return_distribution:
dist = DoubleGauss_Distribution(*pfit)
return dist
else:
return pfit
|
Fits a two-sided Gaussian distribution to match a given confidence interval.
The center of the distribution may be either the median or the mode.
Parameters
----------
med : float
The center of the distribution to which to fit. Default this
will be the mode unless the `median` keyword is set to True.
siglo : float
Value at lower quantile (`q1 = 0.5 - interval/2`) to fit. Often this is
the "lower error bar."
sighi : float
Value at upper quantile (`q2 = 0.5 + interval/2`) to fit. Often this is
the "upper error bar."
interval : float, optional
The confidence interval enclosed by the provided error bars. Default
is 0.683 (1-sigma).
p0 : array-like, optional
Initial guess `doublegauss` parameters for the fit (`mu, sig1, sig2`).
median : bool, optional
Whether to treat the `med` parameter as the median or mode
(default will be mode).
return_distribution: bool, optional
If `True`, then function will return a `DoubleGauss_Distribution` object.
Otherwise, will return just the parameters.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L871-L937
| null |
from __future__ import absolute_import, division, print_function
__author__ = 'Timothy D. Morton <tim.morton@gmail.com>'
"""
Defines objects useful for describing probability distributions.
"""
import numpy as np
import matplotlib.pyplot as plt
import logging
from scipy.interpolate import UnivariateSpline as interpolate
from scipy.integrate import quad
import numpy.random as rand
from scipy.special import erf
from scipy.optimize import leastsq
import pandas as pd
from plotutils import setfig
from .kde import KDE
#figure this generic loading thing out; draft stage currently
def load_distribution(filename,path=''):
fns = pd.read_hdf(filename,path)
store = pd.HDFStore(filename)
if '{}/samples'.format(path) in store:
samples = pd.read_hdf(filename,path+'/samples')
samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0)
cdf = interpolate(fns['vals'],fns['cdf'],s=0)
attrs = store.get_storer('{}/fns'.format(path)).attrs
keywords = attrs.keywords
t = attrs.disttype
store.close()
return t.__init__()
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
ppf = pctile
def save_hdf(self,filename,path='',res=1000,logspace=False):
"""Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
"""
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
"""
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
"""
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
class Distribution_FromH5(Distribution):
"""Creates a Distribution object from one saved to an HDF file.
File must have a `DataFrame` saved under [path]/fns in
the .h5 file, containing 'vals', 'pdf', and 'cdf' columns.
If samples are saved in the HDF storer, then they will be restored
to this object; so will any saved keyword attributes.
These appropriate .h5 files will be created by a call to the `save_hdf`
method of the generic `Distribution` class.
Parameters
----------
filename : string
.h5 file where the distribution is saved.
path : string, optional
Path within the .h5 file where the distribution is saved. By
default this will be the root level, but can be anywhere.
kwargs
Keyword arguments are passed to the `Distribution` constructor.
"""
def __init__(self,filename,path='',**kwargs):
store = pd.HDFStore(filename,'r')
fns = store[path+'/fns']
if '{}/samples'.format(path) in store:
samples = store[path+'/samples']
self.samples = np.array(samples)
minval = fns['vals'].iloc[0]
maxval = fns['vals'].iloc[-1]
pdf = interpolate(fns['vals'],fns['pdf'],s=0,k=1)
#check to see if tabulated CDF is monotonically increasing
d_cdf = fns['cdf'][1:] - fns['cdf'][:-1]
if np.any(d_cdf < 0):
logging.warning('tabulated CDF in {} is not strictly increasing. Recalculating CDF from PDF'.format(filename))
cdf = None #in this case, just recalc cdf from pdf
else:
cdf = interpolate(fns['vals'],fns['cdf'],s=0,k=1)
Distribution.__init__(self,pdf,cdf,minval=minval,maxval=maxval,
**kwargs)
store = pd.HDFStore(filename,'r')
try:
keywords = store.get_storer('{}/fns'.format(path)).attrs.keywords
for kw,val in keywords.iteritems():
setattr(self,kw,val)
except AttributeError:
logging.warning('saved distribution {} does not have keywords or disttype saved; perhaps this distribution was written with an older version.'.format(filename))
store.close()
class Empirical_Distribution(Distribution):
"""Generates a Distribution object given a tabulated PDF.
Parameters
----------
xs : array-like
x-values at which the PDF is evaluated
pdf : array-like
Values of pdf at provided x-values.
smooth : int or float
Smoothing parameter used by the interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,xs,pdf,smooth=0,**kwargs):
pdf /= np.trapz(pdf,xs)
fn = interpolate(xs,pdf,s=smooth)
keywords = {'smooth':smooth}
Distribution.__init__(self,fn,minval=xs.min(),maxval=xs.max(),
keywords=keywords,**kwargs)
class Gaussian_Distribution(Distribution):
"""Generates a normal distribution with given mu, sigma.
***It's probably better to use scipy.stats.norm rather than this
if you care about numerical precision/speed and don't care about the
plotting bells/whistles etc. the `Distribution` class provides.***
Parameters
----------
mu : float
Mean of normal distribution.
sig : float
Width of normal distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig,**kwargs):
self.mu = mu
self.sig = sig
def pdf(x):
return 1./np.sqrt(2*np.pi*sig**2)*np.exp(-(x-mu)**2/(2*sig**2))
def cdf(x):
return 0.5*(1 + erf((x-mu)/np.sqrt(2*sig**2)))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 10*sig
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 10*sig
keywords = {'mu':self.mu,'sig':self.sig}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +/- %.2f' % (self.name,self.mu,self.sig)
def resample(self,N,**kwargs):
return rand.normal(size=int(N))*self.sig + self.mu
class Hist_Distribution(Distribution):
"""Generates a distribution from a histogram of provided samples.
Uses `np.histogram` to create a histogram using the bins keyword,
then interpolates this histogram to create the pdf to pass to the
`Distribution` constructor.
Parameters
----------
samples : array-like
The samples used to create the distribution
bins : int or array-like, optional
Keyword passed to `np.histogram`. If integer, ths will be
the number of bins, if array-like, then this defines bin edges.
equibin : bool, optional
If true and ``bins`` is an integer ``N``, then the bins will be
found by splitting the data into ``N`` equal-sized groups.
smooth : int or float
Smoothing parameter used by the interpolation function.
order : int
Order of the spline to be used for interpolation. Default is
for linear interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,samples,bins=10,equibin=True,smooth=0,order=1,**kwargs):
self.samples = samples
if type(bins)==type(10) and equibin:
N = len(samples)//bins
sortsamples = np.sort(samples)
bins = sortsamples[0::N]
if bins[-1] != sortsamples[-1]:
bins = np.concatenate([bins,np.array([sortsamples[-1]])])
hist,bins = np.histogram(samples,bins=bins,density=True)
self.bins = bins
bins = (bins[1:] + bins[:-1])/2.
pdf_initial = interpolate(bins,hist,s=smooth,k=order)
def pdf(x):
x = np.atleast_1d(x)
y = pdf_initial(x)
w = np.where((x < self.bins[0]) | (x > self.bins[-1]))
y[w] = 0
return y
cdf = interpolate(bins,hist.cumsum()/hist.cumsum().max(),s=smooth,
k=order)
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
keywords = {'bins':bins,'smooth':smooth,'order':order}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def plothist(self,fig=None,**kwargs):
"""Plots a histogram of samples using provided bins.
Parameters
----------
fig : None or int
Parameter passed to `setfig`.
kwargs
Keyword arguments passed to `plt.hist`.
"""
setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
def resample(self,N):
"""Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
"""
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
class Box_Distribution(Distribution):
"""Simple distribution uniform between provided lower and upper limits.
Parameters
----------
lo,hi : float
Lower/upper limits of the distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,lo,hi,**kwargs):
self.lo = lo
self.hi = hi
def pdf(x):
return 1./(hi-lo) + 0*x
def cdf(x):
x = np.atleast_1d(x)
y = (x - lo) / (hi - lo)
y[x < lo] = 0
y[x > hi] = 1
return y
Distribution.__init__(self,pdf,cdf,minval=lo,maxval=hi,**kwargs)
def __str__(self):
return '%.1f < %s < %.1f' % (self.lo,self.name,self.hi)
def resample(self,N):
"""Returns a random sampling.
"""
return rand.random(size=N)*(self.maxval - self.minval) + self.minval
############## Double LorGauss ###########
def double_lorgauss(x,p):
"""Evaluates a normalized distribution that is a mixture of a double-sided Gaussian and Double-sided Lorentzian.
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Input parameters: mu (mode of distribution),
sig1 (LH Gaussian width),
sig2 (RH Gaussian width),
gam1 (LH Lorentzian width),
gam2 (RH Lorentzian width),
G1 (LH Gaussian "strength"),
G2 (RH Gaussian "strength").
Returns
-------
values : float or array-like
Double LorGauss distribution evaluated at input(s). If single value provided,
single value returned.
"""
mu,sig1,sig2,gam1,gam2,G1,G2 = p
gam1 = float(gam1)
gam2 = float(gam2)
G1 = abs(G1)
G2 = abs(G2)
sig1 = abs(sig1)
sig2 = abs(sig2)
gam1 = abs(gam1)
gab2 = abs(gam2)
L2 = (gam1/(gam1 + gam2)) * ((gam2*np.pi*G1)/(sig1*np.sqrt(2*np.pi)) -
(gam2*np.pi*G2)/(sig2*np.sqrt(2*np.pi)) +
(gam2/gam1)*(4-G1-G2))
L1 = 4 - G1 - G2 - L2
#print G1,G2,L1,L2
y1 = G1/(sig1*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig1**2) +\
L1/(np.pi*gam1) * gam1**2/((x-mu)**2 + gam1**2)
y2 = G2/(sig2*np.sqrt(2*np.pi)) * np.exp(-0.5*(x-mu)**2/sig2**2) +\
L2/(np.pi*gam2) * gam2**2/((x-mu)**2 + gam2**2)
lo = (x < mu)
hi = (x >= mu)
return y1*lo + y2*hi
def fit_double_lorgauss(bins,h,Ntry=5):
"""Uses lmfit to fit a "Double LorGauss" distribution to a provided histogram.
Uses a grid of starting guesses to try to avoid local minima.
Parameters
----------
bins, h : array-like
Bins and heights of a histogram, as returned by, e.g., `np.histogram`.
Ntry : int, optional
Spacing of grid for starting guesses. Will try `Ntry**2` different
initial values of the "Gaussian strength" parameters `G1` and `G2`.
Returns
-------
parameters : tuple
Parameters of best-fit "double LorGauss" distribution.
Raises
------
ImportError
If the lmfit module is not available.
"""
try:
from lmfit import minimize, Parameters, Parameter, report_fit
except ImportError:
raise ImportError('you need lmfit to use this function.')
#make sure histogram is normalized
h /= np.trapz(h,bins)
#zero-pad the ends of the distribution to keep fits positive
N = len(bins)
dbin = (bins[1:]-bins[:-1]).mean()
newbins = np.concatenate((np.linspace(bins.min() - N/10*dbin,bins.min(),N/10),
bins,
np.linspace(bins.max(),bins.max() + N/10*dbin,N/10)))
newh = np.concatenate((np.zeros(N/10),h,np.zeros(N/10)))
mu0 = bins[np.argmax(newh)]
sig0 = abs(mu0 - newbins[np.argmin(np.absolute(newh - 0.5*newh.max()))])
def set_params(G1,G2):
params = Parameters()
params.add('mu',value=mu0)
params.add('sig1',value=sig0)
params.add('sig2',value=sig0)
params.add('gam1',value=sig0/10)
params.add('gam2',value=sig0/10)
params.add('G1',value=G1)
params.add('G2',value=G2)
return params
sum_devsq_best = np.inf
outkeep = None
for G1 in np.linspace(0.1,1.9,Ntry):
for G2 in np.linspace(0.1,1.9,Ntry):
params = set_params(G1,G2)
def residual(ps):
pars = (params['mu'].value,
params['sig1'].value,
params['sig2'].value,
params['gam1'].value,
params['gam2'].value,
params['G1'].value,
params['G2'].value)
hmodel = double_lorgauss(newbins,pars)
return newh-hmodel
out = minimize(residual,params)
pars = (out.params['mu'].value,out.params['sig1'].value,
out.params['sig2'].value,out.params['gam1'].value,
out.params['gam2'].value,out.params['G1'].value,
out.params['G2'].value)
sum_devsq = ((newh - double_lorgauss(newbins,pars))**2).sum()
#print 'devs = %.1f; initial guesses for G1, G2; %.1f, %.1f' % (sum_devsq,G1, G2)
if sum_devsq < sum_devsq_best:
sum_devsq_best = sum_devsq
outkeep = out
return (outkeep.params['mu'].value,abs(outkeep.params['sig1'].value),
abs(outkeep.params['sig2'].value),abs(outkeep.params['gam1'].value),
abs(outkeep.params['gam2'].value),abs(outkeep.params['G1'].value),
abs(outkeep.params['G2'].value))
class DoubleLorGauss_Distribution(Distribution):
"""Defines a "double LorGauss" distribution according to the provided parameters.
Parameters
----------
mu,sig1,sig2,gam1,gam2,G1,G2 : float
Parameters of `double_lorgauss` function.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,mu,sig1,sig2,gam1,gam2,G1,G2,**kwargs):
self.mu = mu
self.sig1 = sig1
self.sig2 = sig2
self.gam1 = gam1
self.gam2 = gam2
self.G1 = G1
#self.L1 = L1
self.G2 = G2
#self.L2 = L2
def pdf(x):
return double_lorgauss(x,(self.mu,self.sig1,self.sig2,
self.gam1,self.gam2,
self.G1,self.G2,))
keywords = {'mu':mu,'sig1':sig1,
'sig2':sig2,'gam1':gam1,'gam2':gam2,
'G1':G1,'G2':G2}
Distribution.__init__(self,pdf,keywords=keywords,**kwargs)
######## DoubleGauss #########
def doublegauss(x,p):
"""Evaluates normalized two-sided Gaussian distribution
Parameters
----------
x : float or array-like
Value(s) at which to evaluate distribution
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
Returns
-------
value : float or array-like
Distribution evaluated at input value(s). If single value provided,
single value returned.
"""
mu,sig1,sig2 = p
x = np.atleast_1d(x)
A = 1./(np.sqrt(2*np.pi)*(sig1+sig2)/2.)
ylo = A*np.exp(-(x-mu)**2/(2*sig1**2))
yhi = A*np.exp(-(x-mu)**2/(2*sig2**2))
y = x*0
wlo = np.where(x < mu)
whi = np.where(x >= mu)
y[wlo] = ylo[wlo]
y[whi] = yhi[whi]
if np.size(x)==1:
return y[0]
else:
return y
def doublegauss_cdf(x,p):
"""Cumulative distribution function for two-sided Gaussian
Parameters
----------
x : float
Input values at which to calculate CDF.
p : array-like
Parameters of distribution: (mu: mode of distribution,
sig1: LH width,
sig2: RH width)
"""
x = np.atleast_1d(x)
mu,sig1,sig2 = p
sig1 = np.absolute(sig1)
sig2 = np.absolute(sig2)
ylo = float(sig1)/(sig1 + sig2)*(1 + erf((x-mu)/np.sqrt(2*sig1**2)))
yhi = float(sig1)/(sig1 + sig2) + float(sig2)/(sig1+sig2)*(erf((x-mu)/np.sqrt(2*sig2**2)))
lo = x < mu
hi = x >= mu
return ylo*lo + yhi*hi
def fit_doublegauss_samples(samples,**kwargs):
"""Fits a two-sided Gaussian to a set of samples.
Calculates 0.16, 0.5, and 0.84 quantiles and passes these to
`fit_doublegauss` for fitting.
Parameters
----------
samples : array-like
Samples to which to fit the Gaussian.
kwargs
Keyword arguments passed to `fit_doublegauss`.
"""
sorted_samples = np.sort(samples)
N = len(samples)
med = sorted_samples[N/2]
siglo = med - sorted_samples[int(0.16*N)]
sighi = sorted_samples[int(0.84*N)] - med
return fit_doublegauss(med,siglo,sighi,median=True,**kwargs)
class DoubleGauss_Distribution(Distribution):
"""A Distribution oject representing a two-sided Gaussian distribution
This can be used to represent a slightly asymmetric distribution,
and consists of two half-Normal distributions patched together at the
mode, and normalized appropriately. The pdf and cdf are according to
the `doubleguass` and `doubleguass_cdf` functions, respectively.
Parameters
----------
mu : float
The mode of the distribution.
siglo : float
Width of lower half-Gaussian.
sighi : float
Width of upper half-Gaussian.
kwargs
Keyword arguments are passed to `Distribution` constructor.
"""
def __init__(self,mu,siglo,sighi,**kwargs):
self.mu = mu
self.siglo = float(siglo)
self.sighi = float(sighi)
def pdf(x):
return doublegauss(x,(mu,siglo,sighi))
def cdf(x):
return doublegauss_cdf(x,(mu,siglo,sighi))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 5*siglo
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 5*sighi
keywords = {'mu':mu,'siglo':siglo,'sighi':sighi}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,self.mu,self.sighi,self.siglo)
def resample(self,N,**kwargs):
"""Random resampling of the doublegauss distribution
"""
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
hi = (u < float(self.sighi)/(self.sighi + self.siglo))
lo = (u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[hi] = hivals[hi]
vals[lo] = lovals[lo]
return vals
def powerlawfn(alpha,minval,maxval):
C = powerlawnorm(alpha,minval,maxval)
def fn(inpx):
x = np.atleast_1d(inpx)
y = C*x**(alpha)
y[(x < minval) | (x > maxval)] = 0
return y
return fn
def powerlawnorm(alpha,minval,maxval):
if np.size(alpha)==1:
if alpha == -1:
C = 1/np.log(maxval/minval)
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
else:
C = np.zeros(np.size(alpha))
w = np.where(alpha==-1)
if len(w[0]>0):
C[w] = 1./np.log(maxval/minval)*np.ones(len(w[0]))
nw = np.where(alpha != -1)
C[nw] = (1+alpha[nw])/(maxval**(1+alpha[nw])-minval**(1+alpha[nw]))
else:
C = (1+alpha)/(maxval**(1+alpha)-minval**(1+alpha))
return C
class PowerLaw_Distribution(Distribution):
def __init__(self,alpha,minval,maxval,**kwargs):
self.alpha = alpha
pdf = powerlawfn(alpha,minval,maxval)
Distribution.__init__(self,pdf,minval=minval,maxval=maxval)
######## KDE ###########
class KDE_Distribution(Distribution):
def __init__(self,samples,adaptive=True,draw_direct=True,bandwidth=None,**kwargs):
self.samples = samples
self.bandwidth = bandwidth
self.kde = KDE(samples,adaptive=adaptive,draw_direct=draw_direct,
bandwidth=bandwidth)
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
keywords = {'adaptive':adaptive,'draw_direct':draw_direct,
'bandwidth':bandwidth}
Distribution.__init__(self,self.kde,keywords=keywords,**kwargs)
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def resample(self,N,**kwargs):
return self.kde.resample(N,**kwargs)
class KDE_Distribution_Fromtxt(KDE_Distribution):
def __init__(self,filename,**kwargs):
samples = np.loadtxt(filename)
KDE_Distribution.__init__(self,samples,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
Distribution.pctile
|
python
|
def pctile(self,pct,res=1000):
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
|
Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L136-L158
|
[
"def cdf(x):\n x = np.atleast_1d(x)\n y = np.atleast_1d(cdf_fn(x))\n y[np.where(x < self.minval)] = 0\n y[np.where(x > self.maxval)] = 1\n return y\n"
] |
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
ppf = pctile
def save_hdf(self,filename,path='',res=1000,logspace=False):
"""Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
"""
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
"""
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
"""
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
Distribution.save_hdf
|
python
|
def save_hdf(self,filename,path='',res=1000,logspace=False):
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
|
Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L162-L206
|
[
"def cdf(x):\n x = np.atleast_1d(x)\n y = np.atleast_1d(cdf_fn(x))\n y[np.where(x < self.minval)] = 0\n y[np.where(x > self.maxval)] = 1\n return y\n"
] |
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
ppf = pctile
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
"""
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
"""
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
Distribution.plot
|
python
|
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
|
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L244-L294
| null |
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
ppf = pctile
def save_hdf(self,filename,path='',res=1000,logspace=False):
"""Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
"""
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
Distribution.resample
|
python
|
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
|
Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L296-L357
|
[
"def cdf(x):\n x = np.atleast_1d(x)\n y = np.atleast_1d(cdf_fn(x))\n y[np.where(x < self.minval)] = 0\n y[np.where(x > self.maxval)] = 1\n return y\n"
] |
class Distribution(object):
"""Base class to describe probability distribution.
Has some minimal functional overlap with scipy.stats random variates
(e.g. `ppf`, `rvs`)
Parameters
----------
pdf : callable
The probability density function to be used. Does not have to be
normalized, but must be non-negative.
cdf : callable, optional
The cumulative distribution function. If not provided, this will
be tabulated from the pdf, as long as minval and maxval are also provided
name : string, optional
The name of the distribution (will be used, for example, to label a plot).
Default is empty string.
minval,maxval : float, optional
The minimum and maximum values of the distribution. The Distribution will
evaluate to zero outside these ranges, and this will also define the range
of the CDF. Defaults are -np.inf and +np.inf. If these are not explicity
provided, then a CDF function must be provided.
norm : float, optional
If not provided, this will be calculated by integrating the pdf from
minval to maxval so that the Distribution is a proper PDF that integrates
to unity. `norm` can be non-unity if desired, but beware, as this will
cause some things to act unexpectedly.
cdf_pts : int, optional
Number of points to tabulate in order to calculate CDF, if not provided.
Default is 500.
keywords : dict, optional
Optional dictionary of keywords; these will be saved with the distribution
when `save_hdf` is called.
Raises
------
ValueError
If `cdf` is not provided and minval or maxval are infinity.
"""
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
cdf_pts=500,keywords=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
self.keywords['name'] = name
self.keywords['minval'] = minval
self.keywords['maxval'] = maxval
if norm is None:
self.norm = quad(self.pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and (minval == -np.inf or maxval == np.inf):
raise ValueError('must provide either explicit cdf function or explicit min/max values')
else: #tabulate & interpolate CDF.
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0,k=1)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
#define minval_cdf, maxval_cdf
zero_mask = cdfgrid==0
one_mask = cdfgrid==1
if zero_mask.sum()>0:
self.minval_cdf = pts[zero_mask][-1] #last 0 value
if one_mask.sum()>0:
self.maxval_cdf = pts[one_mask][0] #first 1 value
def pctile(self,pct,res=1000):
"""Returns the desired percentile of the distribution.
Will only work if properly normalized. Designed to mimic
the `ppf` method of the `scipy.stats` random variate objects.
Works by gridding the CDF at a given resolution and matching the nearest
point. NB, this is of course not as precise as an analytic ppf.
Parameters
----------
pct : float
Percentile between 0 and 1.
res : int, optional
The resolution at which to grid the CDF to find the percentile.
Returns
-------
percentile : float
"""
grid = np.linspace(self.minval,self.maxval,res)
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
ppf = pctile
def save_hdf(self,filename,path='',res=1000,logspace=False):
"""Saves distribution to an HDF5 file.
Saves a pandas `dataframe` object containing tabulated pdf and cdf
values at a specfied resolution. After saving to a particular path, a
distribution may be regenerated using the `Distribution_FromH5` subclass.
Parameters
----------
filename : string
File in which to save the distribution. Should end in .h5.
path : string, optional
Path in which to save the distribution within the .h5 file. By
default this is an empty string, which will lead to saving the
`fns` dataframe at the root level of the file.
res : int, optional
Resolution at which to grid the distribution for saving.
logspace : bool, optional
Sets whether the tabulated function should be gridded with log or
linear spacing. Default will be logspace=False, corresponding
to linear gridding.
"""
if logspace:
vals = np.logspace(np.log10(self.minval),
np.log10(self.maxval),
res)
else:
vals = np.linspace(self.minval,self.maxval,res)
d = {'vals':vals,
'pdf':self(vals),
'cdf':self.cdf(vals)}
df = pd.DataFrame(d)
df.to_hdf(filename,path+'/fns')
if hasattr(self,'samples'):
s = pd.Series(self.samples)
s.to_hdf(filename,path+'/samples')
store = pd.HDFStore(filename)
attrs = store.get_storer('{}/fns'.format(path)).attrs
attrs.keywords = self.keywords
attrs.disttype = type(self)
store.close()
def __call__(self,x):
"""
Evaluates pdf. Forces zero outside of (self.minval,self.maxval). Will return
Parameters
----------
x : float, array-like
Value(s) at which to evaluate PDF.
Returns
-------
pdf : float, array-like
Probability density (or re-normalized density if self.norm was explicity
provided.
"""
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
y[(x < self.minval) | (x > self.maxval)] = 0
y /= self.norm
if np.size(x)==1:
return y[0]
else:
return y
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,
self.pctile(0.5),
self.pctile(0.84)-self.pctile(0.5),
self.pctile(0.5)-self.pctile(0.16))
def __repr__(self):
return '<%s object: %s>' % (type(self),str(self))
def plot(self,minval=None,maxval=None,fig=None,log=False,
npts=500,**kwargs):
"""
Plots distribution.
Parameters
----------
minval : float,optional
minimum value to plot. Required if minval of Distribution is
`-np.inf`.
maxval : float, optional
maximum value to plot. Required if maxval of Distribution is
`np.inf`.
fig : None or int, optional
Parameter to pass to `setfig`. If `None`, then a new figure is
created; if a non-zero integer, the plot will go to that figure
(clearing everything first), if zero, then will overplot on
current axes.
log : bool, optional
If `True`, the x-spacing of the points to plot will be logarithmic.
npoints : int, optional
Number of points to plot.
kwargs
Keyword arguments are passed to plt.plot
Raises
------
ValueError
If finite lower and upper bounds are not provided.
"""
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (use minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0,ymax=self(xs).max()*1.2)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds]
def rvs(self,*args,**kwargs):
return self.resample(*args,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
Hist_Distribution.plothist
|
python
|
def plothist(self,fig=None,**kwargs):
setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
|
Plots a histogram of samples using provided bins.
Parameters
----------
fig : None or int
Parameter passed to `setfig`.
kwargs
Keyword arguments passed to `plt.hist`.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L548-L560
| null |
class Hist_Distribution(Distribution):
"""Generates a distribution from a histogram of provided samples.
Uses `np.histogram` to create a histogram using the bins keyword,
then interpolates this histogram to create the pdf to pass to the
`Distribution` constructor.
Parameters
----------
samples : array-like
The samples used to create the distribution
bins : int or array-like, optional
Keyword passed to `np.histogram`. If integer, ths will be
the number of bins, if array-like, then this defines bin edges.
equibin : bool, optional
If true and ``bins`` is an integer ``N``, then the bins will be
found by splitting the data into ``N`` equal-sized groups.
smooth : int or float
Smoothing parameter used by the interpolation function.
order : int
Order of the spline to be used for interpolation. Default is
for linear interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,samples,bins=10,equibin=True,smooth=0,order=1,**kwargs):
self.samples = samples
if type(bins)==type(10) and equibin:
N = len(samples)//bins
sortsamples = np.sort(samples)
bins = sortsamples[0::N]
if bins[-1] != sortsamples[-1]:
bins = np.concatenate([bins,np.array([sortsamples[-1]])])
hist,bins = np.histogram(samples,bins=bins,density=True)
self.bins = bins
bins = (bins[1:] + bins[:-1])/2.
pdf_initial = interpolate(bins,hist,s=smooth,k=order)
def pdf(x):
x = np.atleast_1d(x)
y = pdf_initial(x)
w = np.where((x < self.bins[0]) | (x > self.bins[-1]))
y[w] = 0
return y
cdf = interpolate(bins,hist.cumsum()/hist.cumsum().max(),s=smooth,
k=order)
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
keywords = {'bins':bins,'smooth':smooth,'order':order}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def plothist(self,fig=None,**kwargs):
"""Plots a histogram of samples using provided bins.
Parameters
----------
fig : None or int
Parameter passed to `setfig`.
kwargs
Keyword arguments passed to `plt.hist`.
"""
setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
def resample(self,N):
"""Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
"""
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
Hist_Distribution.resample
|
python
|
def resample(self,N):
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
|
Returns a bootstrap resampling of provided samples.
Parameters
----------
N : int
Number of samples.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L562-L571
| null |
class Hist_Distribution(Distribution):
"""Generates a distribution from a histogram of provided samples.
Uses `np.histogram` to create a histogram using the bins keyword,
then interpolates this histogram to create the pdf to pass to the
`Distribution` constructor.
Parameters
----------
samples : array-like
The samples used to create the distribution
bins : int or array-like, optional
Keyword passed to `np.histogram`. If integer, ths will be
the number of bins, if array-like, then this defines bin edges.
equibin : bool, optional
If true and ``bins`` is an integer ``N``, then the bins will be
found by splitting the data into ``N`` equal-sized groups.
smooth : int or float
Smoothing parameter used by the interpolation function.
order : int
Order of the spline to be used for interpolation. Default is
for linear interpolation.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,samples,bins=10,equibin=True,smooth=0,order=1,**kwargs):
self.samples = samples
if type(bins)==type(10) and equibin:
N = len(samples)//bins
sortsamples = np.sort(samples)
bins = sortsamples[0::N]
if bins[-1] != sortsamples[-1]:
bins = np.concatenate([bins,np.array([sortsamples[-1]])])
hist,bins = np.histogram(samples,bins=bins,density=True)
self.bins = bins
bins = (bins[1:] + bins[:-1])/2.
pdf_initial = interpolate(bins,hist,s=smooth,k=order)
def pdf(x):
x = np.atleast_1d(x)
y = pdf_initial(x)
w = np.where((x < self.bins[0]) | (x > self.bins[-1]))
y[w] = 0
return y
cdf = interpolate(bins,hist.cumsum()/hist.cumsum().max(),s=smooth,
k=order)
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
keywords = {'bins':bins,'smooth':smooth,'order':order}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def plothist(self,fig=None,**kwargs):
"""Plots a histogram of samples using provided bins.
Parameters
----------
fig : None or int
Parameter passed to `setfig`.
kwargs
Keyword arguments passed to `plt.hist`.
"""
setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
def save_hdf(self,filename,path='',**kwargs):
Distribution.save_hdf(self,filename,path=path,**kwargs)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
Box_Distribution.resample
|
python
|
def resample(self,N):
return rand.random(size=N)*(self.maxval - self.minval) + self.minval
|
Returns a random sampling.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L606-L609
| null |
class Box_Distribution(Distribution):
"""Simple distribution uniform between provided lower and upper limits.
Parameters
----------
lo,hi : float
Lower/upper limits of the distribution.
kwargs
Keyword arguments passed to `Distribution` constructor.
"""
def __init__(self,lo,hi,**kwargs):
self.lo = lo
self.hi = hi
def pdf(x):
return 1./(hi-lo) + 0*x
def cdf(x):
x = np.atleast_1d(x)
y = (x - lo) / (hi - lo)
y[x < lo] = 0
y[x > hi] = 1
return y
Distribution.__init__(self,pdf,cdf,minval=lo,maxval=hi,**kwargs)
def __str__(self):
return '%.1f < %s < %.1f' % (self.lo,self.name,self.hi)
|
timothydmorton/simpledist
|
simpledist/distributions.py
|
DoubleGauss_Distribution.resample
|
python
|
def resample(self,N,**kwargs):
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
hi = (u < float(self.sighi)/(self.sighi + self.siglo))
lo = (u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[hi] = hivals[hi]
vals[lo] = lovals[lo]
return vals
|
Random resampling of the doublegauss distribution
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L982-L995
| null |
class DoubleGauss_Distribution(Distribution):
"""A Distribution oject representing a two-sided Gaussian distribution
This can be used to represent a slightly asymmetric distribution,
and consists of two half-Normal distributions patched together at the
mode, and normalized appropriately. The pdf and cdf are according to
the `doubleguass` and `doubleguass_cdf` functions, respectively.
Parameters
----------
mu : float
The mode of the distribution.
siglo : float
Width of lower half-Gaussian.
sighi : float
Width of upper half-Gaussian.
kwargs
Keyword arguments are passed to `Distribution` constructor.
"""
def __init__(self,mu,siglo,sighi,**kwargs):
self.mu = mu
self.siglo = float(siglo)
self.sighi = float(sighi)
def pdf(x):
return doublegauss(x,(mu,siglo,sighi))
def cdf(x):
return doublegauss_cdf(x,(mu,siglo,sighi))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 5*siglo
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 5*sighi
keywords = {'mu':mu,'siglo':siglo,'sighi':sighi}
Distribution.__init__(self,pdf,cdf,keywords=keywords,**kwargs)
def __str__(self):
return '%s = %.2f +%.2f -%.2f' % (self.name,self.mu,self.sighi,self.siglo)
|
timothydmorton/simpledist
|
simpledist/kde.py
|
deriv
|
python
|
def deriv(f,c,dx=0.0001):
return (f(c+dx)-f(c-dx))/(2*dx)
|
deriv(f,c,dx) --> float
Returns f'(x), computed as a symmetric difference quotient.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/kde.py#L224-L230
| null |
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.stats import gaussian_kde
import numpy.random as rand
from scipy.integrate import quad
class KDE(object):
"""An implementation of a kernel density estimator allowing for adaptive kernels.
If the `adaptive` keyword is set to `False`, then this will essentially be just
a wrapper for the `scipy.stats.gaussian_kde` class. If adaptive, though, it
allows for different kernels and different kernel widths according to the
"K-nearest-neighbors" algorithm as discussed `here <http://en.wikipedia.org/wiki/Variable_kernel_density_estimation#Balloon_estimators>`_. The `fast` option does the NN calculation using
broadcasting arrays rather than a brute-force sort. By default the
fast option will be used for datasets smaller than 5000.
Parameters
----------
dataset : array-like
Data set from which to calculate the KDE.
kernel : {'tricube','ep','gauss'}, optional
Kernel function to use for adaptive estimator.
adaptive : bool, optional
Flag whether or not to use adaptive KDE. If this is false, then this
class will just be a wrapper for `scipy.stats.gaussian_kde`.
k : `None` or int, optional
Number to use for K-nearest-neighbor algorithm. If `None`, then
it will be set to the `N/4`, where `N` is the size of the dataset.
fast : `None` or bool, optional
If `None`, then `fast = N < 5001`, where `N` is the size of the dataset.
`fast=True` will force array calculations, which will use lots of RAM
if the dataset is large.
norm : float, optional
Allows the normalization of the distribution to be something other
than unity
bandwidth : `None` or float, optional
Passed to `scipy.stats.gaussian_kde` if not using adaptive mode.
weights : array-like, optional
Not yet implemented.
draw_direct : bool, optional
If `True`, then resampling will be just a bootstrap resampling
of the input samples. If `False`, then resampling will actually
resample each individual kernel (not recommended for large-ish
datasets).
kwargs
Keyword arguments passed to `scipy.stats.gaussian_kde` if adaptive
mode is not being used.
"""
def __init__(self,dataset,kernel='tricube',adaptive=True,k=None,
fast=None,norm=1.,bandwidth=None,weights=None,
draw_direct=False,**kwargs):
self.dataset = np.atleast_1d(dataset)
self.weights = weights
self.n = np.size(dataset)
self.kernel = kernelfn(kernel)
self.kernelname = kernel
self.bandwidth = bandwidth
self.draw_direct = draw_direct
if k:
self.k = k
else:
self.k = self.n/4
self.norm=norm
self.adaptive = adaptive
self.fast = fast
if adaptive:
if self.fast==None:
self.fast = self.n < 5001
if self.fast:
#d1,d2 = np.meshgrid(self.dataset,self.dataset) #use broadcasting instead of meshgrid
diff = np.absolute(self.dataset - self.dataset[:,np.newaxis])
diffsort = np.sort(diff,axis=0)
self.h = diffsort[self.k,:]
##Attempt to handle larger datasets more easily:
else:
sortinds = np.argsort(self.dataset)
x = self.dataset[sortinds]
h = np.zeros(len(x))
for i in np.arange(len(x)):
lo = i - self.k
hi = i + self.k + 1
if lo < 0:
lo = 0
if hi > len(x):
hi = len(x)
diffs = abs(x[lo:hi]-x[i])
h[sortinds[i]] = np.sort(diffs)[self.k]
self.h = h
else:
self.gauss_kde = gaussian_kde(self.dataset,bw_method=bandwidth,**kwargs)
def renorm(self,norm):
"""Change the normalization"""
self.norm = norm
def evaluate(self,points):
if not self.adaptive:
return self.gauss_kde(points)*self.norm
points = np.atleast_1d(points).astype(self.dataset.dtype)
k = self.k
npts = np.size(points)
h = self.h
X,Y = np.meshgrid(self.dataset,points)
H = np.resize(h,(npts,self.n))
U = (X-Y)/H.astype(float)
result = 1./self.n*1./H*self.kernel(U)
return np.sum(result,axis=1)*self.norm
__call__ = evaluate
def integrate_box(self,low,high,forcequad=False,**kwargs):
"""Integrates over a box. Optionally force quad integration, even for non-adaptive.
If adaptive mode is not being used, this will just call the
`scipy.stats.gaussian_kde` method `integrate_box_1d`. Else,
by default, it will call `scipy.integrate.quad`. If the
`forcequad` flag is turned on, then that integration will be
used even if adaptive mode is off.
Parameters
----------
low : float
Lower limit of integration
high : float
Upper limit of integration
forcequad : bool
If `True`, then use the quad integration even if adaptive mode is off.
kwargs
Keyword arguments passed to `scipy.integrate.quad`.
"""
if not self.adaptive and not forcequad:
return self.gauss_kde.integrate_box_1d(low,high)*self.norm
return quad(self.evaluate,low,high,**kwargs)[0]
def resample(self,size=None,direct=None):
if direct is None:
direct = self.draw_direct
size=int(size)
if not self.adaptive:
return np.squeeze(self.gauss_kde.resample(size=size))
if direct:
inds = rand.randint(self.n,size=size)
return self.dataset[inds]
else:
if size is None:
size = self.n
indices = rand.randint(0,self.n,size=size)
means = self.dataset[indices]
h = self.h[indices]
fuzz = kerneldraw(size,self.kernelname)*h
return np.squeeze(means + fuzz)
draw = resample
def epkernel(u):
x = np.atleast_1d(u)
y = 3./4*(1-x*x)
y[((x>1) | (x < -1))] = 0
return y
def gausskernel(u):
return 1/np.sqrt(2*np.pi)*np.exp(-0.5*u*u)
def tricubekernel(u):
x = np.atleast_1d(u)
y = 35./32*(1-x*x)**3
y[((x > 1) | (x < -1))] = 0
return y
def kernelfn(kernel='tricube'):
if kernel=='ep':
return epkernel
elif kernel=='gauss':
return gausskernel
elif kernel=='tricube':
return tricubekernel
def kerneldraw(size=1,kernel='tricube',exact=False):
if kernel=='tricube':
fn = lambda x: 1./2 + 35./32*x - 35./32*x**3 + 21./32*x**5 - 5./32*x**7
u = rand.random(size=size)
if not exact:
xs = np.linspace(-1,1,1e4)
ys = fn(xs)
inds = np.digitize(u,ys)
return xs[inds]
else:
#old way (exact)
rets = np.zeros(size)
for i in np.arange(size):
f = lambda x: u[i]-fn(x)
rets[i] = newton(f,0,restrict=(-1,1))
return rets
def deriv(f,c,dx=0.0001):
"""
deriv(f,c,dx) --> float
Returns f'(x), computed as a symmetric difference quotient.
"""
return (f(c+dx)-f(c-dx))/(2*dx)
def fuzzyequals(a,b,tol=0.0001):
return abs(a-b) < tol
def newton(f,c,tol=0.0001,restrict=None):
"""
newton(f,c) --> float
Returns the x closest to c such that f(x) = 0
"""
#print(c)
if restrict:
lo,hi = restrict
if c < lo or c > hi:
print(c)
c = random*(hi-lo)+lo
if fuzzyequals(f(c),0,tol):
return c
else:
try:
return newton(f,c-f(c)/deriv(f,c,tol),tol,restrict)
except:
return None
|
timothydmorton/simpledist
|
simpledist/kde.py
|
newton
|
python
|
def newton(f,c,tol=0.0001,restrict=None):
#print(c)
if restrict:
lo,hi = restrict
if c < lo or c > hi:
print(c)
c = random*(hi-lo)+lo
if fuzzyequals(f(c),0,tol):
return c
else:
try:
return newton(f,c-f(c)/deriv(f,c,tol),tol,restrict)
except:
return None
|
newton(f,c) --> float
Returns the x closest to c such that f(x) = 0
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/kde.py#L235-L254
|
[
"def newton(f,c,tol=0.0001,restrict=None):\n \"\"\"\n newton(f,c) --> float\n\n Returns the x closest to c such that f(x) = 0\n \"\"\"\n #print(c)\n if restrict:\n lo,hi = restrict\n if c < lo or c > hi:\n print(c)\n c = random*(hi-lo)+lo\n\n if fuzzyequals(f(c),0,tol):\n return c\n else:\n try:\n return newton(f,c-f(c)/deriv(f,c,tol),tol,restrict)\n except:\n return None\n",
"def deriv(f,c,dx=0.0001):\n \"\"\"\n deriv(f,c,dx) --> float\n\n Returns f'(x), computed as a symmetric difference quotient.\n \"\"\"\n return (f(c+dx)-f(c-dx))/(2*dx)\n",
"def fuzzyequals(a,b,tol=0.0001):\n return abs(a-b) < tol\n",
"f = lambda x: u[i]-fn(x)\n"
] |
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.stats import gaussian_kde
import numpy.random as rand
from scipy.integrate import quad
class KDE(object):
"""An implementation of a kernel density estimator allowing for adaptive kernels.
If the `adaptive` keyword is set to `False`, then this will essentially be just
a wrapper for the `scipy.stats.gaussian_kde` class. If adaptive, though, it
allows for different kernels and different kernel widths according to the
"K-nearest-neighbors" algorithm as discussed `here <http://en.wikipedia.org/wiki/Variable_kernel_density_estimation#Balloon_estimators>`_. The `fast` option does the NN calculation using
broadcasting arrays rather than a brute-force sort. By default the
fast option will be used for datasets smaller than 5000.
Parameters
----------
dataset : array-like
Data set from which to calculate the KDE.
kernel : {'tricube','ep','gauss'}, optional
Kernel function to use for adaptive estimator.
adaptive : bool, optional
Flag whether or not to use adaptive KDE. If this is false, then this
class will just be a wrapper for `scipy.stats.gaussian_kde`.
k : `None` or int, optional
Number to use for K-nearest-neighbor algorithm. If `None`, then
it will be set to the `N/4`, where `N` is the size of the dataset.
fast : `None` or bool, optional
If `None`, then `fast = N < 5001`, where `N` is the size of the dataset.
`fast=True` will force array calculations, which will use lots of RAM
if the dataset is large.
norm : float, optional
Allows the normalization of the distribution to be something other
than unity
bandwidth : `None` or float, optional
Passed to `scipy.stats.gaussian_kde` if not using adaptive mode.
weights : array-like, optional
Not yet implemented.
draw_direct : bool, optional
If `True`, then resampling will be just a bootstrap resampling
of the input samples. If `False`, then resampling will actually
resample each individual kernel (not recommended for large-ish
datasets).
kwargs
Keyword arguments passed to `scipy.stats.gaussian_kde` if adaptive
mode is not being used.
"""
def __init__(self,dataset,kernel='tricube',adaptive=True,k=None,
fast=None,norm=1.,bandwidth=None,weights=None,
draw_direct=False,**kwargs):
self.dataset = np.atleast_1d(dataset)
self.weights = weights
self.n = np.size(dataset)
self.kernel = kernelfn(kernel)
self.kernelname = kernel
self.bandwidth = bandwidth
self.draw_direct = draw_direct
if k:
self.k = k
else:
self.k = self.n/4
self.norm=norm
self.adaptive = adaptive
self.fast = fast
if adaptive:
if self.fast==None:
self.fast = self.n < 5001
if self.fast:
#d1,d2 = np.meshgrid(self.dataset,self.dataset) #use broadcasting instead of meshgrid
diff = np.absolute(self.dataset - self.dataset[:,np.newaxis])
diffsort = np.sort(diff,axis=0)
self.h = diffsort[self.k,:]
##Attempt to handle larger datasets more easily:
else:
sortinds = np.argsort(self.dataset)
x = self.dataset[sortinds]
h = np.zeros(len(x))
for i in np.arange(len(x)):
lo = i - self.k
hi = i + self.k + 1
if lo < 0:
lo = 0
if hi > len(x):
hi = len(x)
diffs = abs(x[lo:hi]-x[i])
h[sortinds[i]] = np.sort(diffs)[self.k]
self.h = h
else:
self.gauss_kde = gaussian_kde(self.dataset,bw_method=bandwidth,**kwargs)
def renorm(self,norm):
"""Change the normalization"""
self.norm = norm
def evaluate(self,points):
if not self.adaptive:
return self.gauss_kde(points)*self.norm
points = np.atleast_1d(points).astype(self.dataset.dtype)
k = self.k
npts = np.size(points)
h = self.h
X,Y = np.meshgrid(self.dataset,points)
H = np.resize(h,(npts,self.n))
U = (X-Y)/H.astype(float)
result = 1./self.n*1./H*self.kernel(U)
return np.sum(result,axis=1)*self.norm
__call__ = evaluate
def integrate_box(self,low,high,forcequad=False,**kwargs):
"""Integrates over a box. Optionally force quad integration, even for non-adaptive.
If adaptive mode is not being used, this will just call the
`scipy.stats.gaussian_kde` method `integrate_box_1d`. Else,
by default, it will call `scipy.integrate.quad`. If the
`forcequad` flag is turned on, then that integration will be
used even if adaptive mode is off.
Parameters
----------
low : float
Lower limit of integration
high : float
Upper limit of integration
forcequad : bool
If `True`, then use the quad integration even if adaptive mode is off.
kwargs
Keyword arguments passed to `scipy.integrate.quad`.
"""
if not self.adaptive and not forcequad:
return self.gauss_kde.integrate_box_1d(low,high)*self.norm
return quad(self.evaluate,low,high,**kwargs)[0]
def resample(self,size=None,direct=None):
if direct is None:
direct = self.draw_direct
size=int(size)
if not self.adaptive:
return np.squeeze(self.gauss_kde.resample(size=size))
if direct:
inds = rand.randint(self.n,size=size)
return self.dataset[inds]
else:
if size is None:
size = self.n
indices = rand.randint(0,self.n,size=size)
means = self.dataset[indices]
h = self.h[indices]
fuzz = kerneldraw(size,self.kernelname)*h
return np.squeeze(means + fuzz)
draw = resample
def epkernel(u):
x = np.atleast_1d(u)
y = 3./4*(1-x*x)
y[((x>1) | (x < -1))] = 0
return y
def gausskernel(u):
return 1/np.sqrt(2*np.pi)*np.exp(-0.5*u*u)
def tricubekernel(u):
x = np.atleast_1d(u)
y = 35./32*(1-x*x)**3
y[((x > 1) | (x < -1))] = 0
return y
def kernelfn(kernel='tricube'):
if kernel=='ep':
return epkernel
elif kernel=='gauss':
return gausskernel
elif kernel=='tricube':
return tricubekernel
def kerneldraw(size=1,kernel='tricube',exact=False):
if kernel=='tricube':
fn = lambda x: 1./2 + 35./32*x - 35./32*x**3 + 21./32*x**5 - 5./32*x**7
u = rand.random(size=size)
if not exact:
xs = np.linspace(-1,1,1e4)
ys = fn(xs)
inds = np.digitize(u,ys)
return xs[inds]
else:
#old way (exact)
rets = np.zeros(size)
for i in np.arange(size):
f = lambda x: u[i]-fn(x)
rets[i] = newton(f,0,restrict=(-1,1))
return rets
def deriv(f,c,dx=0.0001):
"""
deriv(f,c,dx) --> float
Returns f'(x), computed as a symmetric difference quotient.
"""
return (f(c+dx)-f(c-dx))/(2*dx)
def fuzzyequals(a,b,tol=0.0001):
return abs(a-b) < tol
def newton(f,c,tol=0.0001,restrict=None):
"""
newton(f,c) --> float
Returns the x closest to c such that f(x) = 0
"""
#print(c)
if restrict:
lo,hi = restrict
if c < lo or c > hi:
print(c)
c = random*(hi-lo)+lo
if fuzzyequals(f(c),0,tol):
return c
else:
try:
return newton(f,c-f(c)/deriv(f,c,tol),tol,restrict)
except:
return None
|
timothydmorton/simpledist
|
simpledist/kde.py
|
KDE.integrate_box
|
python
|
def integrate_box(self,low,high,forcequad=False,**kwargs):
if not self.adaptive and not forcequad:
return self.gauss_kde.integrate_box_1d(low,high)*self.norm
return quad(self.evaluate,low,high,**kwargs)[0]
|
Integrates over a box. Optionally force quad integration, even for non-adaptive.
If adaptive mode is not being used, this will just call the
`scipy.stats.gaussian_kde` method `integrate_box_1d`. Else,
by default, it will call `scipy.integrate.quad`. If the
`forcequad` flag is turned on, then that integration will be
used even if adaptive mode is off.
Parameters
----------
low : float
Lower limit of integration
high : float
Upper limit of integration
forcequad : bool
If `True`, then use the quad integration even if adaptive mode is off.
kwargs
Keyword arguments passed to `scipy.integrate.quad`.
|
train
|
https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/kde.py#L131-L156
| null |
class KDE(object):
"""An implementation of a kernel density estimator allowing for adaptive kernels.
If the `adaptive` keyword is set to `False`, then this will essentially be just
a wrapper for the `scipy.stats.gaussian_kde` class. If adaptive, though, it
allows for different kernels and different kernel widths according to the
"K-nearest-neighbors" algorithm as discussed `here <http://en.wikipedia.org/wiki/Variable_kernel_density_estimation#Balloon_estimators>`_. The `fast` option does the NN calculation using
broadcasting arrays rather than a brute-force sort. By default the
fast option will be used for datasets smaller than 5000.
Parameters
----------
dataset : array-like
Data set from which to calculate the KDE.
kernel : {'tricube','ep','gauss'}, optional
Kernel function to use for adaptive estimator.
adaptive : bool, optional
Flag whether or not to use adaptive KDE. If this is false, then this
class will just be a wrapper for `scipy.stats.gaussian_kde`.
k : `None` or int, optional
Number to use for K-nearest-neighbor algorithm. If `None`, then
it will be set to the `N/4`, where `N` is the size of the dataset.
fast : `None` or bool, optional
If `None`, then `fast = N < 5001`, where `N` is the size of the dataset.
`fast=True` will force array calculations, which will use lots of RAM
if the dataset is large.
norm : float, optional
Allows the normalization of the distribution to be something other
than unity
bandwidth : `None` or float, optional
Passed to `scipy.stats.gaussian_kde` if not using adaptive mode.
weights : array-like, optional
Not yet implemented.
draw_direct : bool, optional
If `True`, then resampling will be just a bootstrap resampling
of the input samples. If `False`, then resampling will actually
resample each individual kernel (not recommended for large-ish
datasets).
kwargs
Keyword arguments passed to `scipy.stats.gaussian_kde` if adaptive
mode is not being used.
"""
def __init__(self,dataset,kernel='tricube',adaptive=True,k=None,
fast=None,norm=1.,bandwidth=None,weights=None,
draw_direct=False,**kwargs):
self.dataset = np.atleast_1d(dataset)
self.weights = weights
self.n = np.size(dataset)
self.kernel = kernelfn(kernel)
self.kernelname = kernel
self.bandwidth = bandwidth
self.draw_direct = draw_direct
if k:
self.k = k
else:
self.k = self.n/4
self.norm=norm
self.adaptive = adaptive
self.fast = fast
if adaptive:
if self.fast==None:
self.fast = self.n < 5001
if self.fast:
#d1,d2 = np.meshgrid(self.dataset,self.dataset) #use broadcasting instead of meshgrid
diff = np.absolute(self.dataset - self.dataset[:,np.newaxis])
diffsort = np.sort(diff,axis=0)
self.h = diffsort[self.k,:]
##Attempt to handle larger datasets more easily:
else:
sortinds = np.argsort(self.dataset)
x = self.dataset[sortinds]
h = np.zeros(len(x))
for i in np.arange(len(x)):
lo = i - self.k
hi = i + self.k + 1
if lo < 0:
lo = 0
if hi > len(x):
hi = len(x)
diffs = abs(x[lo:hi]-x[i])
h[sortinds[i]] = np.sort(diffs)[self.k]
self.h = h
else:
self.gauss_kde = gaussian_kde(self.dataset,bw_method=bandwidth,**kwargs)
def renorm(self,norm):
"""Change the normalization"""
self.norm = norm
def evaluate(self,points):
if not self.adaptive:
return self.gauss_kde(points)*self.norm
points = np.atleast_1d(points).astype(self.dataset.dtype)
k = self.k
npts = np.size(points)
h = self.h
X,Y = np.meshgrid(self.dataset,points)
H = np.resize(h,(npts,self.n))
U = (X-Y)/H.astype(float)
result = 1./self.n*1./H*self.kernel(U)
return np.sum(result,axis=1)*self.norm
__call__ = evaluate
def resample(self,size=None,direct=None):
if direct is None:
direct = self.draw_direct
size=int(size)
if not self.adaptive:
return np.squeeze(self.gauss_kde.resample(size=size))
if direct:
inds = rand.randint(self.n,size=size)
return self.dataset[inds]
else:
if size is None:
size = self.n
indices = rand.randint(0,self.n,size=size)
means = self.dataset[indices]
h = self.h[indices]
fuzz = kerneldraw(size,self.kernelname)*h
return np.squeeze(means + fuzz)
draw = resample
|
ttm/socialLegacy
|
social/twiter.py
|
Twitter.searchTag
|
python
|
def searchTag(self,HTAG="#arenaNETmundial"):
search = t.search(q=HTAG,count=100,result_type="recent")
ss=search[:]
search = t.search(q=HTAG,count=150,max_id=ss[-1]['id']-1,result_type="recent")
#search = t.search(q=HTAG,count=150,since_id=ss[-1]['id'],result_type="recent")
while seach:
ss+=search[:]
search = t.search(q=HTAG,count=150,max_id=ss[-1]['id']-1,result_type="recent")
|
Set Twitter search or stream criteria for the selection of tweets
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/twiter.py#L24-L32
| null |
class Twitter:
"""Simplified Twitter interface for Stability observance
# function to set authentication: __init__()
# function to set hashtag and other tweets selection criteria: searchTag()
# function to search tweets: searchTag()
# function to stream tweets: void
"""
def __init__(self,app_key=TWITTER_API_KEY,
app_secret=TWITTER_API_KEY_SECRET,
oauth_token=TWITTER_ACCESS_TOKEN,
oauth_token_secret=TWITTER_ACCESS_TOKEN_SECRET):
"""Start twitter seach and stream interface"""
self.t = Twython(app_key =app_key ,
app_secret =app_secret ,
oauth_token =oauth_token ,
oauth_token_secret=oauth_token_secret)
|
ttm/socialLegacy
|
social/utils.py
|
makeRetweetNetwork
|
python
|
def makeRetweetNetwork(tweets):
G=x.DiGraph()
G_=x.DiGraph()
for tweet in tweets:
text=tweet["text"]
us=tweet["user"]["screen_name"]
if text.startswith("RT @"):
prev_us=text.split(":")[0].split("@")[1]
#print(us,prev_us,text)
if G.has_edge(prev_us,us):
G[prev_us][us]["weight"]+=1
G_[prev_us][us]["weight"]+=1
else:
G.add_edge(prev_us, us, weight=1.)
G_.add_edge(prev_us, us, weight=1.)
if us not in G_.nodes():
G_.add_node(us)
return G,G_
|
Receives tweets, returns directed retweet networks.
Without and with isolated nodes.
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/utils.py#L2-L23
| null |
import networkx as x
def makeRetweetNetwork(tweets):
"""Receives tweets, returns directed retweet networks.
Without and with isolated nodes.
"""
G=x.DiGraph()
G_=x.DiGraph()
for tweet in tweets:
text=tweet["text"]
us=tweet["user"]["screen_name"]
if text.startswith("RT @"):
prev_us=text.split(":")[0].split("@")[1]
#print(us,prev_us,text)
if G.has_edge(prev_us,us):
G[prev_us][us]["weight"]+=1
G_[prev_us][us]["weight"]+=1
else:
G.add_edge(prev_us, us, weight=1.)
G_.add_edge(prev_us, us, weight=1.)
if us not in G_.nodes():
G_.add_node(us)
return G,G_
class GDFgraph:
"""Read GDF graph into networkX"""
def __init__(self,filename="../data/RenatoFabbri06022014.gdf"):
with open(filename,"r") as f:
self.data=f.read()
self.lines=self.data.split("\n")
columns=self.lines[0].split(">")[1].split(",")
column_names=[i.split(" ")[0] for i in columns]
data_friends={cn:[] for cn in column_names}
for line in self.lines[1:]:
if not line:
break
if ">" in line:
columns=line.split(">")[1].split(",")
column_names2=[i.split(" ")[0] for i in columns]
data_friendships={cn:[] for cn in column_names2}
continue
fields=line.split(",")
if "column_names2" not in locals():
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friends[column_names[i]].append(field)
else:
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friendships[column_names2[i]].append(field)
self.data_friendships=data_friendships
self.data_friends=data_friends
self.n_friends=len(data_friends[column_names[0]])
self.n_friendships=len(data_friendships[column_names2[0]])
self.makeNetwork()
def makeNetwork(self):
"""Makes graph object from .gdf loaded data"""
if "weight" in self.data_friendships.keys():
self.G=G=x.DiGraph()
else:
self.G=G=x.Graph()
F=self.data_friends
for friendn in range(self.n_friends):
if "posts" in F.keys():
G.add_node(F["name"][friendn],
label=F["label"][friendn],
posts=F["posts"][friendn])
elif "agerank" in F.keys():
G.add_node(F["name"][friendn],
label=F["label"][friendn],
gender=F["sex"][friendn],
locale=F["locale"][friendn],
agerank=F["agerank"][friendn])
else:
G.add_node(F["name"][friendn],
label=F["label"][friendn],
gender=F["sex"][friendn],
locale=F["locale"][friendn])
F=self.data_friendships
for friendshipn in range(self.n_friendships):
if "weight" in F.keys():
G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn],weight=F["weight"][friendshipn])
else:
G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn])
|
ttm/socialLegacy
|
social/utils.py
|
GDFgraph.makeNetwork
|
python
|
def makeNetwork(self):
if "weight" in self.data_friendships.keys():
self.G=G=x.DiGraph()
else:
self.G=G=x.Graph()
F=self.data_friends
for friendn in range(self.n_friends):
if "posts" in F.keys():
G.add_node(F["name"][friendn],
label=F["label"][friendn],
posts=F["posts"][friendn])
elif "agerank" in F.keys():
G.add_node(F["name"][friendn],
label=F["label"][friendn],
gender=F["sex"][friendn],
locale=F["locale"][friendn],
agerank=F["agerank"][friendn])
else:
G.add_node(F["name"][friendn],
label=F["label"][friendn],
gender=F["sex"][friendn],
locale=F["locale"][friendn])
F=self.data_friendships
for friendshipn in range(self.n_friendships):
if "weight" in F.keys():
G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn],weight=F["weight"][friendshipn])
else:
G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn])
|
Makes graph object from .gdf loaded data
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/utils.py#L57-L85
| null |
class GDFgraph:
"""Read GDF graph into networkX"""
def __init__(self,filename="../data/RenatoFabbri06022014.gdf"):
with open(filename,"r") as f:
self.data=f.read()
self.lines=self.data.split("\n")
columns=self.lines[0].split(">")[1].split(",")
column_names=[i.split(" ")[0] for i in columns]
data_friends={cn:[] for cn in column_names}
for line in self.lines[1:]:
if not line:
break
if ">" in line:
columns=line.split(">")[1].split(",")
column_names2=[i.split(" ")[0] for i in columns]
data_friendships={cn:[] for cn in column_names2}
continue
fields=line.split(",")
if "column_names2" not in locals():
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friends[column_names[i]].append(field)
else:
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friendships[column_names2[i]].append(field)
self.data_friendships=data_friendships
self.data_friends=data_friends
self.n_friends=len(data_friends[column_names[0]])
self.n_friendships=len(data_friendships[column_names2[0]])
self.makeNetwork()
|
ttm/socialLegacy
|
social/fb/gml2rdf.py
|
triplifyGML
|
python
|
def triplifyGML(dpath="../data/fb/",fname="foo.gdf",fnamei="foo_interaction.gdf",
fpath="./fb/",scriptpath=None,uid=None,sid=None,fb_link=None,ego=True,umbrella_dir=None):
c("iniciado tripgml")
if sum(c.isdigit() for c in fname)==4:
year=re.findall(r".*(\d\d\d\d).gml",fname)[0][0]
B.datetime_snapshot=datetime.date(*[int(i) for i in (year)])
if sum(c.isdigit() for c in fname)==12:
day,month,year,hour,minute=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d)_(\d\d)(\d\d).gml",fname)[0]
B.datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)])
if sum(c.isdigit() for c in fname)==14:
day,month,year,hour,minute,second=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d)_(\d\d)(\d\d)(\d\d).gml",fname)[0]
B.datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute,second)])
elif sum(c.isdigit() for c in fname)==8:
day,month,year=re.findall(r".*(\d\d)(\d\d)(\d\d\d\d).gml",fname)[0]
B.datetime_snapshot=datetime.date(*[int(i) for i in (year,month,day)])
B.datetime_snapshot_=datetime_snapshot.isoformat()
B.fname=fname
B.fnamei=fnamei
B.name=fname.replace(".gml","_gml")
if fnamei:
B.namei=fnamei[:-4]
B.ego=ego
B.friendship=bool(fname)
B.interaction=bool(fnamei)
B.sid=sid
B.uid=uid
B.scriptpath=scriptpath
B.fb_link=fb_link
B.dpath=dpath
B.fpath=fpath
B.prefix="https://raw.githubusercontent.com/OpenLinkedSocialData/{}master/".format(umbrella_dir)
B.umbrella_dir=umbrella_dir
c("antes de ler")
#fnet=S.fb.readGML(dpath+fname) # return networkx graph
fnet=S.fb.readGML2(dpath+fname) # return networkx graph
# return fnet
c("depois de ler, antes de fazer rdf")
fnet_=rdfFriendshipNetwork(fnet) # return rdflib graph
if B.interaction:
inet=S.fb.readGML(dpath+fnamei) # return networkx graph
inet_=rdfInteractionNetwork(inet) # return rdflib graph
else:
inet_=0
meta=makeMetadata(fnet_,inet_) # return rdflib graph with metadata about the structure
c("depois de rdf, escrita em disco")
writeAllFB(fnet_,inet_,meta) # write linked data tree
c("cabo")
|
Produce a linked data publication tree from a standard GML file.
INPUTS:
======
=> the data directory path
=> the file name (fname) of the friendship network
=> the file name (fnamei) of the interaction network
=> the final path (fpath) for the tree of files to be created
=> a path to the script that is calling this function (scriptpath)
=> the numeric id (uid) of the facebook user or group of the network(s)
=> the numeric id (sid) of the facebook user or group of the network (s)
=> the facebook link (fb_link) of the user or group
=> the network is from a user (ego==True) or a group (ego==False)
OUTPUTS:
=======
the tree in the directory fpath.
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fb/gml2rdf.py#L4-L67
|
[
"def rdfFriendshipNetwork(fnet):\n tg=P.rdf.makeBasicGraph([[\"po\",\"fb\"],[P.rdf.ns.per,P.rdf.ns.fb]],\"Facebook friendship network from {} . Ego: {}\".format(B.name,B.ego))\n #if sum([(\"user\" in i) for i in fnet[\"individuals\"][\"label\"]])==len(fnet[\"individuals\"][\"label\"]):\n # # nomes falsos, ids espurios\n # anonymized=True\n #else:\n # anonymized=False\n B.fanon=False\n\n tkeys=list(fnet[\"individuals\"].keys())\n foo={\"uris\":[],\"vals\":[]}\n for tkey in tkeys:\n if tkey != \"groupid\":\n foo[\"uris\"]+=[eval(\"P.rdf.ns.fb.\"+trans(tkey))]\n foo[\"vals\"]+=[fnet[\"individuals\"][tkey]]\n if \"groupid\" in tkeys:\n B.groupuid=fnet[\"individuals\"][\"groupid\"][0]\n tkeys.remove(\"groupid\")\n else:\n B.groupuid=None\n iname= tkeys.index(\"name\")\n B.uid_names={}\n for vals_ in zip(*foo[\"vals\"]):\n vals_=list(vals_)\n cid=vals_[iname]\n foo_=foo[\"uris\"][:]\n take=0\n name_=\"{}-{}\".format(B.name,cid)\n B.uid_names[cid]=name_\n vals_=[el for i,el in enumerate(vals_) if i not in (iname,)]\n foo_= [el for i,el in enumerate(foo_) if i not in (iname,)]\n i=0\n ii=[]\n for val in vals_:\n if not val:\n ii+=[i]\n i+=1\n vals_=[val for i,val in enumerate(vals_) if i not in ii]\n\n# take+=1\n# if not vals_[isex-take]:\n# vals_=[el for i,el in enumerate(vals_) if i not in (isex-take,)]\n# foo_= [el for i,el in enumerate(foo_) if i not in (isex-take,)]\n# take+=1\n# if not vals_[ilocale-take]:\n# vals_=[el for i,el in enumerate(vals_) if i not in (ilocale-take,)]\n# foo_= [el for i,el in enumerate(foo_) if i not in (ilocale-take,)]\n ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name_)\n P.rdf.link([tg],ind,None,foo_,\n vals_)\n B.nfriends=len(foo[\"vals\"][0])\n #if anonymized:\n # B.fvars=[trans(i) for j,i in enumerate(tkeys) if j not in (ilabel,iname)]\n #else:\n # B.fvars=[trans(i) for i in tkeys]\n B.fvars=[trans(i) for j,i in enumerate(tkeys) if j not in (iname,)]\n\n friendships_=[fnet[\"relations\"][i] for i in (\"node1\",\"node2\")]\n c(\"escritos participantes\")\n i=1\n for uid1,uid2 in zip(*friendships_):\n uids=[r.URIRef(P.rdf.ns.fb.Participant+\"#\"+B.uid_names[i]) for i in (uid1,uid2)]\n P.rdf.link_([tg],uids[0],None,[P.rdf.ns.fb.friend],[uids[1]])\n if (i%1000)==0:\n c(i)\n i+=1\n P.rdf.G(tg[0],P.rdf.ns.fb.friend,\n P.rdf.ns.rdf.type,\n P.rdf.ns.owl.SymmetricProperty)\n B.nfriendships=len(friendships_[0])\n c(\"escritas amizades\")\n return tg\n",
"def makeMetadata(fnet,inet):\n desc=\"facebook network from {} . Ego: {}. Friendship: {}. Interaction: {}.\".format(B.name,B.ego,B.friendship,B.interaction)\n tg2=P.rdf.makeBasicGraph([[\"po\",\"fb\"],[P.rdf.ns.per,P.rdf.ns.fb]],\"Metadata for the \"+desc)\n aname=B.name+\"_fb\"\n ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot,\n aname,\"Snapshot {}\".format(aname))\n ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot,\n aname,\"Snapshot {}\".format(aname))\n\n foo={\"uris\":[],\"vals\":[]}\n if ego:\n if B.uid:\n foo[\"uris\"].append(P.rdf.ns.fb.uid)\n foo[\"vals\"].append(B.uid)\n if B.sid:\n foo[\"uris\"].append(P.rdf.ns.fb.sid)\n foo[\"vals\"].append(B.sid)\n else:\n if B.uid:\n foo[\"uris\"].append(P.rdf.ns.fb.groupID)\n foo[\"vals\"].append(B.uid)\n if B.sid:\n foo[\"uris\"].append(P.rdf.ns.fb.groupSID)\n foo[\"vals\"].append(B.sid)\n if B.groupuid:\n foo[\"uris\"].append(P.rdf.ns.fb.groupID)\n foo[\"vals\"].append(B.groupuid)\n if B.fb_link:\n if type(B.fb_link) not in (type([2,3]),type((2,3))):\n foo[\"uris\"].append(P.rdf.ns.fb.fbLink)\n foo[\"vals\"].append(B.fb_link)\n else:\n for link in B.fb_link:\n foo[\"uris\"].append(P.rdf.ns.fb.fbLink)\n foo[\"vals\"].append(link)\n if B.friendship:\n B.ffile=\"{}{}/base/{}\".format(B.prefix,aname,B.fname)\n foo[\"uris\"]+=[P.rdf.ns.fb.originalFriendshipFile,\n P.rdf.ns.po.friendshipXMLFile,\n P.rdf.ns.po.friendshipTTLFile]+\\\n [ P.rdf.ns.fb.nFriends,\n P.rdf.ns.fb.nFriendships,\n P.rdf.ns.fb.fAnon ]+\\\n [P.rdf.ns.fb.friendAttribute]*len(B.fvars)\n B.frdf_file=\"{}{}/rdf/{}Friendship.owl\".format(B.prefix,aname,aname)\n foo[\"vals\"]+=[B.ffile,\n B.frdf_file,\n \"{}{}/rdf/{}Friendship.ttl\".format(B.prefix,aname,aname) ]+\\\n [B.nfriends,B.nfriendships,B.fanon]+list(B.fvars)\n\n if B.interaction:\n B.ifile=\"{}{}/base/{}\".format(B.prefix,aname,B.fnamei)\n foo[\"uris\"]+=[P.rdf.ns.fb.originalInteractionFile,\n P.rdf.ns.po.interactionXMLFile,\n P.rdf.ns.po.interactionTTLFile,]+\\\n [ P.rdf.ns.fb.nFriendsInteracted,\n P.rdf.ns.fb.nInteractions,\n P.rdf.ns.fb.iAnon ]+\\\n [ P.rdf.ns.fb.interactionFriendAttribute]*len(B.fvarsi)+\\\n [ P.rdf.ns.fb.interactionAttribute]*len(B.ivars)\n\n B.irdf_file=\"{}{}/rdf/{}Interaction.owl\".format(B.prefix,aname,aname)\n foo[\"vals\"]+=[B.ifile,\n B.irdf_file,\n \"{}{}/rdf/{}Interaction.ttl\".format(B.prefix,aname,aname),]+\\\n [B.nfriendsi,B.ninteractions,B.ianon]+list(B.fvarsi)+list(B.ivars)\n\n foo[\"uris\"]+=[\n P.rdf.ns.fb.ego,\n P.rdf.ns.fb.friendship,\n P.rdf.ns.fb.interaction,\n ]\n foo[\"vals\"]+=[B.ego,B.friendship,B.interaction]\n\n #https://github.com/OpenLinkedSocialData/fbGroups/tree/master/AdornoNaoEhEnfeite29032013_fb\n B.available_dir=\"https://github.com/OpenLinkedSocialData/{}tree/master/{}\".format(B.umbrella_dir,aname)\n B.mrdf_file=\"{}{}/rdf/{}Meta.owl\".format(B.prefix,aname,aname)\n P.rdf.link([tg2],ind,\"Snapshot {}\".format(aname),\n [P.rdf.ns.po.createdAt,\n P.rdf.ns.po.triplifiedIn,\n P.rdf.ns.po.donatedBy,\n P.rdf.ns.po.availableAt,\n P.rdf.ns.po.discorveryRDFFile,\n P.rdf.ns.po.discoveryTTLFile,\n P.rdf.ns.po.acquiredThrough,\n P.rdf.ns.rdfs.comment,\n ]+foo[\"uris\"],\n [B.datetime_snapshot,\n datetime.datetime.now(),\n B.name,\n B.available_dir,\n B.mrdf_file,\n \"{}{}/rdf/{}Meta.ttl\".format(B.prefix,aname,aname),\n \"Netvizz\",\n desc,\n ]+foo[\"vals\"])\n ind2=P.rdf.IC([tg2],P.rdf.ns.po.Platform,\"Facebook\")\n P.rdf.link_([tg2],ind,\"Snapshot {}\".format(aname),\n [P.rdf.ns.po.socialProtocol],\n [ind2],\n [\"Facebook\"])\n #for friend_attr in fg2[\"friends\"]:\n return tg2\n",
"def writeAllFB(fnet,inet,mnet):\n aname=B.name+\"_fb\"\n fpath_=\"{}{}/\".format(B.fpath,aname)\n if B.friendship:\n P.rdf.writeAll(fnet,aname+\"Friendship\",fpath_,False,1)\n if B.interaction:\n P.rdf.writeAll(inet,aname+\"Interaction\",fpath_)\n # copia o script que gera este codigo\n if not os.path.isdir(fpath_+\"scripts\"):\n os.mkdir(fpath_+\"scripts\")\n shutil.copy(scriptpath,fpath_+\"scripts/\")\n # copia do base data\n if not os.path.isdir(fpath_+\"base\"):\n os.mkdir(fpath_+\"base\")\n shutil.copy(B.dpath+B.fname,fpath_+\"base/\")\n if B.interaction:\n shutil.copy(B.dpath+B.fnamei,fpath_+\"base/\")\n tinteraction=\"\"\"\\n{} individuals with metadata {}\nand {} interactions with metadata {} constitute the interaction \nnetwork in file:\n{}\n(anonymized: {}).\"\"\".format( B.nfriendsi,str(B.fvarsi),\n B.ninteractions,str(B.ivars),B.irdf_file,\n B.ianon)\n originals=\"{}\\n{}\".format(B.ffile,B.ifile)\n else:\n tinteraction=\"\"\n originals=B.ffile\n\n\n P.rdf.writeAll(mnet,aname+\"Meta\",fpath_,1)\n # faz um README\n with open(fpath_+\"README\",\"w\") as f:\n f.write(\"\"\"This repo delivers RDF data from the facebook\nfriendship network of {} collected around {}.\n{} individuals with metadata {}\nand {} friendships constitute the friendship network in file:\n{}\n(anonymized: {}).{}\nMetadata for discovery is in file:\n{}\nOriginal files:\n{}\nEgo network: {}\nFriendship network: {}\nInteraction network: {}\nAll files should be available at the git repository:\n{}\n\\n\"\"\".format(\n B.name,B.datetime_snapshot_,\n B.nfriends,str(B.fvars),\n B.nfriendships, B.frdf_file,\n# B.fanon,\n \"FALSE, but no id\",\n tinteraction,\n B.mrdf_file,originals,\n B.ego, B.friendship,B.interaction,B.available_dir\n ))\n"
] |
import percolation as P, social as S, rdflib as r, builtins as B, re, datetime, os, shutil
c=P.utils.check
def trans(tkey):
if tkey=="name":
return "numericID"
if tkey=="label":
return "name"
return tkey
def rdfInteractionNetwork(fnet):
tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"Facebook interaction network from {} . Ego: {}".format(B.name,B.ego))
tkeys=list(fnet["individuals"].keys())
if sum([("user" in i) for i in fnet["individuals"]["label"]])==len(fnet["individuals"]["label"]):
# nomes falsos, ids espurios
anonymized=True
else:
anonymized=False
B.ianon=anonymized
foo={"uris":[],"vals":[]}
for tkey in tkeys:
foo["uris"]+=[eval("P.rdf.ns.fb."+trans(tkey))]
foo["vals"]+=[fnet["individuals"][tkey]]
iname= tkeys.index("name")
ilabel=tkeys.index("label")
B.nfriendsi=len(foo["vals"][0])
if anonymized:
B.fvarsi=[trans(i) for j,i in enumerate(tkeys) if j not in (ilabel,iname)]
else:
B.fvarsi=[trans(i) for i in tkeys]
icount=0
#uid_names={}
for vals_ in zip(*foo["vals"]):
vals_=list(vals_)
cid=vals_[iname]
foo_=foo["uris"][:]
if anonymized:
if cid in B.uid_names.keys():
name_=B.uid_names[cid]
else:
anon_name=vals_[ilabel]
name_="{}-{}".format(B.namei,anon_name)
B.uid_names[cid]=name_
#anon_name=vals_[ilabel]
#name_="{}-{}".format(B.name,anon_name)
#uid_names[cid]=name_
vals_=[el for i,el in enumerate(vals_) if i not in (ilabel,iname)]
foo_= [el for i,el in enumerate(foo_) if i not in (ilabel,iname)]
elif not vals_[ilabel]:
vals_=[el for i,el in enumerate(vals_) if i not in (ilabel,)]
foo_= [el for i,el in enumerate(foo_) if i not in (ilabel,)]
name_=cid
#name_="po:noname-{}-{}-{}".format(cid,B.groupuid,B.datetime_snapshot)
#c("{} --- {}".format(name_, vals_[ilabel]))
#vals_=list(vals_)
#vals_[ilabel]=name_
else:
name_,label=[foo["vals"][i][icount] for i in (iname,ilabel)]
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name_)
P.rdf.link([tg],ind,None,foo_,
vals_)
icount+=1
B.ivars=["node1","node2","weight"]
interactions_=[fnet["relations"][i] for i in B.ivars]
B.ninteractions=len(interactions_[0])
c("escritos participantes")
i=1
for uid1,uid2,weight in zip(*interactions_):
weight_=int(weight)
if weight_-weight != 0:
raise ValueError("float weights in fb interaction networks?")
if anonymized:
uid1=uid_names[uid1]
uid2=uid_names[uid2]
flabel="{}-{}".format(uid1,uid2)
else:
flabel="{}-{}-{}-{}".format(B.fname,B.datetime_snapshot_,uid1,uid2)
ind=P.rdf.IC([tg],P.rdf.ns.fb.Interaction,flabel)
uids=[r.URIRef(P.rdf.ns.fb.Participant+"#"+str(i)) for i in (uid1,uid2)]
P.rdf.link_([tg],ind,None,[P.rdf.ns.fb.iFrom,P.rdf.ns.fb.iTo],
uids,draw=False)
P.rdf.link([tg],ind,None,[P.rdf.ns.fb.weight],
[weight_],draw=False)
if (i%1000)==0:
c(i)
i+=1
c("escritas amizades")
return tg
def rdfFriendshipNetwork(fnet):
tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"Facebook friendship network from {} . Ego: {}".format(B.name,B.ego))
#if sum([("user" in i) for i in fnet["individuals"]["label"]])==len(fnet["individuals"]["label"]):
# # nomes falsos, ids espurios
# anonymized=True
#else:
# anonymized=False
B.fanon=False
tkeys=list(fnet["individuals"].keys())
foo={"uris":[],"vals":[]}
for tkey in tkeys:
if tkey != "groupid":
foo["uris"]+=[eval("P.rdf.ns.fb."+trans(tkey))]
foo["vals"]+=[fnet["individuals"][tkey]]
if "groupid" in tkeys:
B.groupuid=fnet["individuals"]["groupid"][0]
tkeys.remove("groupid")
else:
B.groupuid=None
iname= tkeys.index("name")
B.uid_names={}
for vals_ in zip(*foo["vals"]):
vals_=list(vals_)
cid=vals_[iname]
foo_=foo["uris"][:]
take=0
name_="{}-{}".format(B.name,cid)
B.uid_names[cid]=name_
vals_=[el for i,el in enumerate(vals_) if i not in (iname,)]
foo_= [el for i,el in enumerate(foo_) if i not in (iname,)]
i=0
ii=[]
for val in vals_:
if not val:
ii+=[i]
i+=1
vals_=[val for i,val in enumerate(vals_) if i not in ii]
# take+=1
# if not vals_[isex-take]:
# vals_=[el for i,el in enumerate(vals_) if i not in (isex-take,)]
# foo_= [el for i,el in enumerate(foo_) if i not in (isex-take,)]
# take+=1
# if not vals_[ilocale-take]:
# vals_=[el for i,el in enumerate(vals_) if i not in (ilocale-take,)]
# foo_= [el for i,el in enumerate(foo_) if i not in (ilocale-take,)]
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name_)
P.rdf.link([tg],ind,None,foo_,
vals_)
B.nfriends=len(foo["vals"][0])
#if anonymized:
# B.fvars=[trans(i) for j,i in enumerate(tkeys) if j not in (ilabel,iname)]
#else:
# B.fvars=[trans(i) for i in tkeys]
B.fvars=[trans(i) for j,i in enumerate(tkeys) if j not in (iname,)]
friendships_=[fnet["relations"][i] for i in ("node1","node2")]
c("escritos participantes")
i=1
for uid1,uid2 in zip(*friendships_):
uids=[r.URIRef(P.rdf.ns.fb.Participant+"#"+B.uid_names[i]) for i in (uid1,uid2)]
P.rdf.link_([tg],uids[0],None,[P.rdf.ns.fb.friend],[uids[1]])
if (i%1000)==0:
c(i)
i+=1
P.rdf.G(tg[0],P.rdf.ns.fb.friend,
P.rdf.ns.rdf.type,
P.rdf.ns.owl.SymmetricProperty)
B.nfriendships=len(friendships_[0])
c("escritas amizades")
return tg
def makeMetadata(fnet,inet):
desc="facebook network from {} . Ego: {}. Friendship: {}. Interaction: {}.".format(B.name,B.ego,B.friendship,B.interaction)
tg2=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"Metadata for the "+desc)
aname=B.name+"_fb"
ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot,
aname,"Snapshot {}".format(aname))
ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot,
aname,"Snapshot {}".format(aname))
foo={"uris":[],"vals":[]}
if ego:
if B.uid:
foo["uris"].append(P.rdf.ns.fb.uid)
foo["vals"].append(B.uid)
if B.sid:
foo["uris"].append(P.rdf.ns.fb.sid)
foo["vals"].append(B.sid)
else:
if B.uid:
foo["uris"].append(P.rdf.ns.fb.groupID)
foo["vals"].append(B.uid)
if B.sid:
foo["uris"].append(P.rdf.ns.fb.groupSID)
foo["vals"].append(B.sid)
if B.groupuid:
foo["uris"].append(P.rdf.ns.fb.groupID)
foo["vals"].append(B.groupuid)
if B.fb_link:
if type(B.fb_link) not in (type([2,3]),type((2,3))):
foo["uris"].append(P.rdf.ns.fb.fbLink)
foo["vals"].append(B.fb_link)
else:
for link in B.fb_link:
foo["uris"].append(P.rdf.ns.fb.fbLink)
foo["vals"].append(link)
if B.friendship:
B.ffile="{}{}/base/{}".format(B.prefix,aname,B.fname)
foo["uris"]+=[P.rdf.ns.fb.originalFriendshipFile,
P.rdf.ns.po.friendshipXMLFile,
P.rdf.ns.po.friendshipTTLFile]+\
[ P.rdf.ns.fb.nFriends,
P.rdf.ns.fb.nFriendships,
P.rdf.ns.fb.fAnon ]+\
[P.rdf.ns.fb.friendAttribute]*len(B.fvars)
B.frdf_file="{}{}/rdf/{}Friendship.owl".format(B.prefix,aname,aname)
foo["vals"]+=[B.ffile,
B.frdf_file,
"{}{}/rdf/{}Friendship.ttl".format(B.prefix,aname,aname) ]+\
[B.nfriends,B.nfriendships,B.fanon]+list(B.fvars)
if B.interaction:
B.ifile="{}{}/base/{}".format(B.prefix,aname,B.fnamei)
foo["uris"]+=[P.rdf.ns.fb.originalInteractionFile,
P.rdf.ns.po.interactionXMLFile,
P.rdf.ns.po.interactionTTLFile,]+\
[ P.rdf.ns.fb.nFriendsInteracted,
P.rdf.ns.fb.nInteractions,
P.rdf.ns.fb.iAnon ]+\
[ P.rdf.ns.fb.interactionFriendAttribute]*len(B.fvarsi)+\
[ P.rdf.ns.fb.interactionAttribute]*len(B.ivars)
B.irdf_file="{}{}/rdf/{}Interaction.owl".format(B.prefix,aname,aname)
foo["vals"]+=[B.ifile,
B.irdf_file,
"{}{}/rdf/{}Interaction.ttl".format(B.prefix,aname,aname),]+\
[B.nfriendsi,B.ninteractions,B.ianon]+list(B.fvarsi)+list(B.ivars)
foo["uris"]+=[
P.rdf.ns.fb.ego,
P.rdf.ns.fb.friendship,
P.rdf.ns.fb.interaction,
]
foo["vals"]+=[B.ego,B.friendship,B.interaction]
#https://github.com/OpenLinkedSocialData/fbGroups/tree/master/AdornoNaoEhEnfeite29032013_fb
B.available_dir="https://github.com/OpenLinkedSocialData/{}tree/master/{}".format(B.umbrella_dir,aname)
B.mrdf_file="{}{}/rdf/{}Meta.owl".format(B.prefix,aname,aname)
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.createdAt,
P.rdf.ns.po.triplifiedIn,
P.rdf.ns.po.donatedBy,
P.rdf.ns.po.availableAt,
P.rdf.ns.po.discorveryRDFFile,
P.rdf.ns.po.discoveryTTLFile,
P.rdf.ns.po.acquiredThrough,
P.rdf.ns.rdfs.comment,
]+foo["uris"],
[B.datetime_snapshot,
datetime.datetime.now(),
B.name,
B.available_dir,
B.mrdf_file,
"{}{}/rdf/{}Meta.ttl".format(B.prefix,aname,aname),
"Netvizz",
desc,
]+foo["vals"])
ind2=P.rdf.IC([tg2],P.rdf.ns.po.Platform,"Facebook")
P.rdf.link_([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.socialProtocol],
[ind2],
["Facebook"])
#for friend_attr in fg2["friends"]:
return tg2
def writeAllFB(fnet,inet,mnet):
aname=B.name+"_fb"
fpath_="{}{}/".format(B.fpath,aname)
if B.friendship:
P.rdf.writeAll(fnet,aname+"Friendship",fpath_,False,1)
if B.interaction:
P.rdf.writeAll(inet,aname+"Interaction",fpath_)
# copia o script que gera este codigo
if not os.path.isdir(fpath_+"scripts"):
os.mkdir(fpath_+"scripts")
shutil.copy(scriptpath,fpath_+"scripts/")
# copia do base data
if not os.path.isdir(fpath_+"base"):
os.mkdir(fpath_+"base")
shutil.copy(B.dpath+B.fname,fpath_+"base/")
if B.interaction:
shutil.copy(B.dpath+B.fnamei,fpath_+"base/")
tinteraction="""\n{} individuals with metadata {}
and {} interactions with metadata {} constitute the interaction
network in file:
{}
(anonymized: {}).""".format( B.nfriendsi,str(B.fvarsi),
B.ninteractions,str(B.ivars),B.irdf_file,
B.ianon)
originals="{}\n{}".format(B.ffile,B.ifile)
else:
tinteraction=""
originals=B.ffile
P.rdf.writeAll(mnet,aname+"Meta",fpath_,1)
# faz um README
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the facebook
friendship network of {} collected around {}.
{} individuals with metadata {}
and {} friendships constitute the friendship network in file:
{}
(anonymized: {}).{}
Metadata for discovery is in file:
{}
Original files:
{}
Ego network: {}
Friendship network: {}
Interaction network: {}
All files should be available at the git repository:
{}
\n""".format(
B.name,B.datetime_snapshot_,
B.nfriends,str(B.fvars),
B.nfriendships, B.frdf_file,
# B.fanon,
"FALSE, but no id",
tinteraction,
B.mrdf_file,originals,
B.ego, B.friendship,B.interaction,B.available_dir
))
|
ttm/socialLegacy
|
social/fb/fb.py
|
triplifyGML
|
python
|
def triplifyGML(fname="foo.gml",fpath="./fb/",scriptpath=None,uid=None,sid=None,extra_info=None):
# aname=fname.split("/")[-1].split(".")[0]
aname=fname.split("/")[-1].split(".")[0]
if "RonaldCosta" in fname:
aname=fname.split("/")[-1].split(".")[0]
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).gml",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_="Ronald Scherolt Costa"
elif "AntonioAnzoategui" in fname:
aname=re.findall(".*/([a-zA-Z]*\d*)",fname)[0]
name,year,month,day,hour,minute=re.findall(r".*/([a-zA-Z]*).*_(\d+)_(\d*)_(\d*)_(\d*)_(\d*)_.*",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat()[:-3]
name_="Antônio Anzoategui Fabbri"
elif re.findall(".*/[a-zA-Z]*(\d)",fname):
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gml",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
elif re.findall("[a-zA-Z]*_",fname):
name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gml",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
else:
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
aname+="_fb"
name+="_fb"
c("started snapshot",aname)
tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"the {} facebook ego friendship network")
tg2=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"RDF metadata for the facebook friendship network of my son")
snapshot=P.rdf.IC([tg2],P.rdf.ns.po.FacebookSnapshot,
aname,"Snapshot {}".format(aname))
extra_uri=extra_val=[]
if extra_info:
extra_uri=[NS.po.extraInfo]
extra_val=[extra_info]
P.rdf.link([tg2],snapshot,"Snapshot {}".format(aname),
[P.rdf.ns.po.createdAt,
P.rdf.ns.po.triplifiedIn,
P.rdf.ns.po.donatedBy,
P.rdf.ns.po.availableAt,
P.rdf.ns.po.originalFile,
P.rdf.ns.po.onlineTranslateXMLFile,
P.rdf.ns.po.onlineTranslateTTLFile,
P.rdf.ns.po.translateXMLFile,
P.rdf.ns.po.translateTTLFile,
P.rdf.ns.po.onlineMetaXMLFile,
P.rdf.ns.po.onlineMetaTTLFile,
P.rdf.ns.po.metaXMLFilename,
P.rdf.ns.po.metaTTLFilename,
P.rdf.ns.po.acquiredThrough,
P.rdf.ns.rdfs.comment,
P.rdf.ns.fb.uid,
P.rdf.ns.fb.sid
]+extra_uri,
[datetime_snapshot,
datetime.datetime.now(),
name,
"https://github.com/ttm/{}".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")[-1]),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.rdf".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname),
"{}Translate.rdf".format(aname),
"{}Translate.ttl".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.rdf".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname),
"{}Meta.owl".format(aname),
"{}Meta.ttl".format(aname),
"Netvizz",
"The facebook friendship network from {}".format(name_),
uid,
sid
]+extra_val)
#for friend_attr in fg2["friends"]:
c((aname,name_,datetime_snapshot))
fg2=x.read_gml(fname)
c("read gml")
for uid in fg2:
c(uid)
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,"{}-{}".format(aname,uid))
if "locale" in fg2.node[uid].keys():
data=[fg2.node[uid][attr] for attr in ("id","label","locale","sex","agerank","wallcount")]
uris=[NS.fb.gid, NS.fb.name,
NS.fb.locale, NS.fb.sex,
NS.fb.agerank,NS.fb.wallcount]
else:
data=[fg2.node[uid][attr] for attr in ("id","label","sex","agerank","wallcount")]
uris=[NS.fb.gid, NS.fb.name,
NS.fb.sex,
NS.fb.agerank,NS.fb.wallcount]
P.rdf.link([tg],ind, None,uris,data,draw=False)
P.rdf.link_([tg],ind,None,[NS.po.snapshot],[snapshot],draw=False)
#friends_=[fg2["friends"][i] for i in ("name","label","locale","sex","agerank")]
#for name,label,locale,sex,agerank in zip(*friends_):
# ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label)
# P.rdf.link([tg],ind,label,[P.rdf.ns.fb.uid,P.rdf.ns.fb.name,
# P.rdf.ns.fb.locale,P.rdf.ns.fb.sex,
# P.rdf.ns.fb.agerank],
# [name,label,locale,sex,agerank])
c("escritos participantes")
#friendships_=[fg2["friendships"][i] for i in ("node1","node2")]
i=1
for uid1,uid2 in fg2.edges():
flabel="{}-{}-{}".format(aname,uid1,uid2)
ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,flabel)
uids=[P.rdf.IC(None,P.rdf.ns.fb.Participant,"{}-{}".format(aname,i)) for i in (uid1,uid2)]
P.rdf.link_([tg],ind,flabel,[NS.po.snapshot]+[NS.fb.member]*2,
[snapshot]+uids,draw=False)
P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1])
if (i%1000)==0:
c(i)
i+=1
c("escritas amizades")
tg_=[tg[0]+tg2[0],tg[1]]
fpath_="{}/{}/".format(fpath,aname)
P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1)
# copia o script que gera este codigo
if not os.path.isdir(fpath_+"scripts"):
os.mkdir(fpath_+"scripts")
#shutil.copy(this_dir+"/../tests/rdfMyFNetwork2.py",fpath+"scripts/")
shutil.copy(scriptpath,fpath_+"scripts/")
# copia do base data
if not os.path.isdir(fpath_+"base"):
os.mkdir(fpath_+"base")
shutil.copy(fname,fpath_+"base/")
P.rdf.writeAll(tg2,aname+"Meta",fpath_,False)
# faz um README
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the facebook
friendship network of {} ({}) collected at {}.
It has {} friends with metadata {};
and {} friendships.
The linked data is available at rdf/ dir and was
generated by the routine in the script/ directory.
Original data from Netvizz in data/\n""".format(
name_,aname,datetime_snapshot,
fg2.number_of_nodes(),
"name, locale (maybe), sex, agerank and wallcount",
fg2.number_of_edges()))
|
Produce a linked data publication tree from a standard GML file.
INPUTS:
=> the file name (fname, with path) where the gdf file
of the friendship network is.
=> the final path (fpath) for the tree of files to be created.
=> a path to the script that is calling this function (scriptpath).
=> the numeric id (uid) of the facebook user of which fname holds a friendship network
=> the numeric id (sid) of the facebook user of which fname holds a friendship network
OUTPUTS:
the tree in the directory fpath.
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fb/fb.py#L11-L167
| null |
import time, os, pickle, shutil, datetime, re
import networkx as x, rdflib as r
from splinter import Browser
from bs4 import BeautifulSoup
import percolation as P
c=P.utils.check
this_dir = os.path.split(__file__)[0]
NS=P.rdf.ns
a=NS.rdf.type
def triplifyGML(fname="foo.gml",fpath="./fb/",scriptpath=None,uid=None,sid=None,extra_info=None):
"""Produce a linked data publication tree from a standard GML file.
INPUTS:
=> the file name (fname, with path) where the gdf file
of the friendship network is.
=> the final path (fpath) for the tree of files to be created.
=> a path to the script that is calling this function (scriptpath).
=> the numeric id (uid) of the facebook user of which fname holds a friendship network
=> the numeric id (sid) of the facebook user of which fname holds a friendship network
OUTPUTS:
the tree in the directory fpath."""
# aname=fname.split("/")[-1].split(".")[0]
aname=fname.split("/")[-1].split(".")[0]
if "RonaldCosta" in fname:
aname=fname.split("/")[-1].split(".")[0]
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).gml",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_="Ronald Scherolt Costa"
elif "AntonioAnzoategui" in fname:
aname=re.findall(".*/([a-zA-Z]*\d*)",fname)[0]
name,year,month,day,hour,minute=re.findall(r".*/([a-zA-Z]*).*_(\d+)_(\d*)_(\d*)_(\d*)_(\d*)_.*",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat()[:-3]
name_="Antônio Anzoategui Fabbri"
elif re.findall(".*/[a-zA-Z]*(\d)",fname):
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gml",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
elif re.findall("[a-zA-Z]*_",fname):
name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gml",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
else:
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
aname+="_fb"
name+="_fb"
c("started snapshot",aname)
tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"the {} facebook ego friendship network")
tg2=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"RDF metadata for the facebook friendship network of my son")
snapshot=P.rdf.IC([tg2],P.rdf.ns.po.FacebookSnapshot,
aname,"Snapshot {}".format(aname))
extra_uri=extra_val=[]
if extra_info:
extra_uri=[NS.po.extraInfo]
extra_val=[extra_info]
P.rdf.link([tg2],snapshot,"Snapshot {}".format(aname),
[P.rdf.ns.po.createdAt,
P.rdf.ns.po.triplifiedIn,
P.rdf.ns.po.donatedBy,
P.rdf.ns.po.availableAt,
P.rdf.ns.po.originalFile,
P.rdf.ns.po.onlineTranslateXMLFile,
P.rdf.ns.po.onlineTranslateTTLFile,
P.rdf.ns.po.translateXMLFile,
P.rdf.ns.po.translateTTLFile,
P.rdf.ns.po.onlineMetaXMLFile,
P.rdf.ns.po.onlineMetaTTLFile,
P.rdf.ns.po.metaXMLFilename,
P.rdf.ns.po.metaTTLFilename,
P.rdf.ns.po.acquiredThrough,
P.rdf.ns.rdfs.comment,
P.rdf.ns.fb.uid,
P.rdf.ns.fb.sid
]+extra_uri,
[datetime_snapshot,
datetime.datetime.now(),
name,
"https://github.com/ttm/{}".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")[-1]),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.rdf".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname),
"{}Translate.rdf".format(aname),
"{}Translate.ttl".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.rdf".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname),
"{}Meta.owl".format(aname),
"{}Meta.ttl".format(aname),
"Netvizz",
"The facebook friendship network from {}".format(name_),
uid,
sid
]+extra_val)
#for friend_attr in fg2["friends"]:
c((aname,name_,datetime_snapshot))
fg2=x.read_gml(fname)
c("read gml")
for uid in fg2:
c(uid)
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,"{}-{}".format(aname,uid))
if "locale" in fg2.node[uid].keys():
data=[fg2.node[uid][attr] for attr in ("id","label","locale","sex","agerank","wallcount")]
uris=[NS.fb.gid, NS.fb.name,
NS.fb.locale, NS.fb.sex,
NS.fb.agerank,NS.fb.wallcount]
else:
data=[fg2.node[uid][attr] for attr in ("id","label","sex","agerank","wallcount")]
uris=[NS.fb.gid, NS.fb.name,
NS.fb.sex,
NS.fb.agerank,NS.fb.wallcount]
P.rdf.link([tg],ind, None,uris,data,draw=False)
P.rdf.link_([tg],ind,None,[NS.po.snapshot],[snapshot],draw=False)
#friends_=[fg2["friends"][i] for i in ("name","label","locale","sex","agerank")]
#for name,label,locale,sex,agerank in zip(*friends_):
# ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label)
# P.rdf.link([tg],ind,label,[P.rdf.ns.fb.uid,P.rdf.ns.fb.name,
# P.rdf.ns.fb.locale,P.rdf.ns.fb.sex,
# P.rdf.ns.fb.agerank],
# [name,label,locale,sex,agerank])
c("escritos participantes")
#friendships_=[fg2["friendships"][i] for i in ("node1","node2")]
i=1
for uid1,uid2 in fg2.edges():
flabel="{}-{}-{}".format(aname,uid1,uid2)
ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,flabel)
uids=[P.rdf.IC(None,P.rdf.ns.fb.Participant,"{}-{}".format(aname,i)) for i in (uid1,uid2)]
P.rdf.link_([tg],ind,flabel,[NS.po.snapshot]+[NS.fb.member]*2,
[snapshot]+uids,draw=False)
P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1])
if (i%1000)==0:
c(i)
i+=1
c("escritas amizades")
tg_=[tg[0]+tg2[0],tg[1]]
fpath_="{}/{}/".format(fpath,aname)
P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1)
# copia o script que gera este codigo
if not os.path.isdir(fpath_+"scripts"):
os.mkdir(fpath_+"scripts")
#shutil.copy(this_dir+"/../tests/rdfMyFNetwork2.py",fpath+"scripts/")
shutil.copy(scriptpath,fpath_+"scripts/")
# copia do base data
if not os.path.isdir(fpath_+"base"):
os.mkdir(fpath_+"base")
shutil.copy(fname,fpath_+"base/")
P.rdf.writeAll(tg2,aname+"Meta",fpath_,False)
# faz um README
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the facebook
friendship network of {} ({}) collected at {}.
It has {} friends with metadata {};
and {} friendships.
The linked data is available at rdf/ dir and was
generated by the routine in the script/ directory.
Original data from Netvizz in data/\n""".format(
name_,aname,datetime_snapshot,
fg2.number_of_nodes(),
"name, locale (maybe), sex, agerank and wallcount",
fg2.number_of_edges()))
def triplifyGDFInteraction(fname="foo.gdf",fpath="./fb/",scriptpath=None,uid=None,sid=None,dlink=None):
"""Produce a linked data publication tree from GDF files of a Facebook interaction network.
INPUTS:
=> the file name (fname, with path) where the gdf file
of the friendship network is.
=> the final path (fpath) for the tree of files to be created.
=> a path to the script that is calling this function (scriptpath).
=> the numeric id (uid) of the facebook group
=> the string id (sid) of the facebook group of which fname holds a friendship network
OUTPUTS:
the tree in the directory fpath."""
#aname=fname.split("/")[-1].split(".")[0]+"_fb"
aname=fname.split("/")[-1].split(".")[0]
if re.findall("[a-zA-Z]*_[0-9]",fname):
name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gdf",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
elif re.findall("(\d)",fname):
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gdf",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
else:
datetime_snapshot=datetime.datetime(2013,3,15).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",aname))
aname+="_fb"
name=aname
tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"The facebook interaction network from the {} file".format(fname)) # drop de agraph
tg2=P.rdf.makeBasicGraph([["po"],[P.rdf.ns.per]],"Metadata for my facebook ego friendship network RDF files") # drop de agraph
ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot,
aname,"Snapshot {}".format(aname))
foo={"uris":[],"vals":[]}
if sid:
foo["uris"].append(P.rdf.ns.fb.sid)
foo["vals"].append(sid)
if uid:
foo["uris"].append(P.rdf.ns.fb.uid)
foo["vals"].append(uid)
if dlink:
foo["uris"].append(P.rdf.ns.fb.link)
foo["vals"].append(dlink)
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.createdAt,
P.rdf.ns.po.triplifiedIn,
P.rdf.ns.po.donatedBy,
P.rdf.ns.po.availableAt,
P.rdf.ns.po.originalFile,
P.rdf.ns.po.rdfFile,
P.rdf.ns.po.ttlFile,
P.rdf.ns.po.discorveryRDFFile,
P.rdf.ns.po.discoveryTTLFile,
P.rdf.ns.po.acquiredThrough,
P.rdf.ns.rdfs.comment,
]+foo["uris"],
[datetime_snapshot,
datetime.datetime.now(),
name,
"https://github.com/ttm/{}".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.owl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.owl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname),
"Netvizz",
"The facebook friendship network from {}".format(name_),
]+foo["vals"])
#for friend_attr in fg2["friends"]:
fg2=readGDF(fname)
tkeys=list(fg2["friends"].keys())
def trans(tkey):
if tkey=="name":
return "uid"
if tkey=="label":
return "name"
return tkey
foo={"uris":[],"vals":[]}
for tkey in tkeys:
if tkey=="groupid":
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.uid,],
[fg2["friends"][tkey][0]])
if tkey:
foo["uris"]+=[eval("P.rdf.ns.fb."+trans(tkey))]
foo["vals"]+=[fg2["friends"][tkey]]
print(tkeys)
iname=tkeys.index("name")
ilabel=tkeys.index("label")
icount=0
name_label={}
for vals_ in zip(*foo["vals"]):
name,label=[foo["vals"][i][icount] for i in (iname,ilabel)]
if not label:
label="po:noname"
vals_=list(vals_)
vals_[ilabel]=label
name_label[name]=label
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label)
P.rdf.link([tg],ind,label,foo["uris"],
vals_,draw=False)
icount+=1
friendships_=[fg2["friendships"][i] for i in ("node1","node2")]
c("escritos participantes")
i=1
for uid1,uid2 in zip(*friendships_):
flabel="{}-{}".format(uid1,uid2)
labels=[name_label[uu] for uu in (uid1,uid2)]
ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,
flabel)
#flabel,"Friendship "+flabel)
ind1=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid1)
ind2=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid2)
uids=[r.URIRef(P.rdf.ns.fb.Participant+"#"+str(i)) for i in (uid1,uid2)]
P.rdf.link_([tg],ind,"Friendship "+flabel,[P.rdf.ns.fb.member]*2,
uids,labels,draw=False)
P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1])
if (i%1000)==0:
c(i)
i+=1
P.rdf.G(tg[0],P.rdf.ns.fb.friend,
P.rdf.ns.rdf.type,
P.rdf.ns.owl.SymmetricProperty)
c("escritas amizades")
tg_=[tg[0]+tg2[0],tg[1]]
fpath_="{}{}/".format(fpath,aname)
P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1)
# copia o script que gera este codigo
if not os.path.isdir(fpath_+"scripts"):
os.mkdir(fpath_+"scripts")
shutil.copy(scriptpath,fpath_+"scripts/")
# copia do base data
if not os.path.isdir(fpath_+"base"):
os.mkdir(fpath_+"base")
shutil.copy(fname,fpath_+"base/")
P.rdf.writeAll(tg2,aname+"Meta",fpath_,1)
# faz um README
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the facebook
friendship network of {} collected at {}.
It has {} friends with metadata {};
and {} friendships.
The linked data is available at rdf/ dir and was
generated by the routine in the script/ directory.
Original data from Netvizz in data/\n""".format(
name_,datetime_snapshot,
len(fg2["friends"]["name"]),
"facebook numeric id, name, locale, sex and agerank",
len(fg2["friendships"]["node1"])
))
def triplifyGDF(fname="foo.gdf",fpath="./fb/",scriptpath=None,uid=None,sid=None,dlink=None):
"""Produce a linked data publication tree from a standard GDF file.
INPUTS:
=> the file name (fname, with path) where the gdf file
of the friendship network is.
=> the final path (fpath) for the tree of files to be created.
=> a path to the script that is calling this function (scriptpath).
=> the numeric id (uid) of the facebook user of which fname holds a friendship network
=> the numeric id (sid) of the facebook user of which fname holds a friendship network
OUTPUTS:
the tree in the directory fpath."""
#aname=fname.split("/")[-1].split(".")[0]+"_fb"
aname=fname.split("/")[-1].split(".")[0]
if re.findall("[a-zA-Z]*_[0-9]",fname):
name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gdf",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
elif re.findall("(\d)",fname):
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gdf",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
else:
datetime_snapshot=datetime.datetime(2013,3,15).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",aname))
aname+="_fb"
name=aname
tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"My facebook ego friendship network") # drop de agraph
tg2=P.rdf.makeBasicGraph([["po"],[P.rdf.ns.per]],"Metadata for my facebook ego friendship network RDF files") # drop de agraph
ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot,
aname,"Snapshot {}".format(aname))
foo={"uris":[],"vals":[]}
if sid:
foo["uris"].append(P.rdf.ns.fb.sid)
foo["vals"].append(sid)
if uid:
foo["uris"].append(P.rdf.ns.fb.uid)
foo["vals"].append(uid)
if dlink:
foo["uris"].append(P.rdf.ns.fb.link)
foo["vals"].append(dlink)
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.createdAt,
P.rdf.ns.po.triplifiedIn,
P.rdf.ns.po.donatedBy,
P.rdf.ns.po.availableAt,
P.rdf.ns.po.originalFile,
P.rdf.ns.po.rdfFile,
P.rdf.ns.po.ttlFile,
P.rdf.ns.po.discorveryRDFFile,
P.rdf.ns.po.discoveryTTLFile,
P.rdf.ns.po.acquiredThrough,
P.rdf.ns.rdfs.comment,
]+foo["uris"],
[datetime_snapshot,
datetime.datetime.now(),
name,
"https://github.com/ttm/{}".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.owl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.owl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname),
"Netvizz",
"The facebook friendship network from {}".format(name_),
]+foo["vals"])
#for friend_attr in fg2["friends"]:
fg2=readGDF(fname)
tkeys=list(fg2["friends"].keys())
def trans(tkey):
if tkey=="name":
return "uid"
if tkey=="label":
return "name"
return tkey
foo={"uris":[],"vals":[]}
for tkey in tkeys:
if tkey=="groupid":
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.uid,],
[fg2["friends"][tkey][0]])
if tkey:
foo["uris"]+=[eval("P.rdf.ns.fb."+trans(tkey))]
foo["vals"]+=[fg2["friends"][tkey]]
print(tkeys)
iname=tkeys.index("name")
ilabel=tkeys.index("label")
icount=0
name_label={}
for vals_ in zip(*foo["vals"]):
name,label=[foo["vals"][i][icount] for i in (iname,ilabel)]
if not label:
label="po:noname"
vals_=list(vals_)
vals_[ilabel]=label
name_label[name]=label
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label)
P.rdf.link([tg],ind,label,foo["uris"],
vals_,draw=False)
icount+=1
friendships_=[fg2["friendships"][i] for i in ("node1","node2")]
c("escritos participantes")
i=1
for uid1,uid2 in zip(*friendships_):
flabel="{}-{}".format(uid1,uid2)
labels=[name_label[uu] for uu in (uid1,uid2)]
ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,
flabel)
#flabel,"Friendship "+flabel)
ind1=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid1)
ind2=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid2)
uids=[r.URIRef(P.rdf.ns.fb.Participant+"#"+str(i)) for i in (uid1,uid2)]
P.rdf.link_([tg],ind,"Friendship "+flabel,[P.rdf.ns.fb.member]*2,
uids,labels,draw=False)
P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1])
if (i%1000)==0:
c(i)
i+=1
P.rdf.G(tg[0],P.rdf.ns.fb.friend,
P.rdf.ns.rdf.type,
P.rdf.ns.owl.SymmetricProperty)
c("escritas amizades")
tg_=[tg[0]+tg2[0],tg[1]]
fpath_="{}{}/".format(fpath,aname)
P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1)
# copia o script que gera este codigo
if not os.path.isdir(fpath_+"scripts"):
os.mkdir(fpath_+"scripts")
shutil.copy(scriptpath,fpath_+"scripts/")
# copia do base data
if not os.path.isdir(fpath_+"base"):
os.mkdir(fpath_+"base")
shutil.copy(fname,fpath_+"base/")
P.rdf.writeAll(tg2,aname+"Meta",fpath_,1)
# faz um README
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the facebook
friendship network of {} collected at {}.
It has {} friends with metadata {};
and {} friendships.
The linked data is available at rdf/ dir and was
generated by the routine in the script/ directory.
Original data from Netvizz in data/\n""".format(
name_,datetime_snapshot,
len(fg2["friends"]["name"]),
"facebook numeric id, name, locale, sex and agerank",
len(fg2["friendships"]["node1"])
))
def makeRDF(readgdf_dict,fdir="../data/rdf/"):
# return rdflib graph from the data
rd=readgdf_dict
# ns=namespaces=pe.namespaces(["rdf","rdfs","xsd", # basic namespaces
# ])
# for friend in range(len(rd["friends"]["name"])):
# pass
def readGDF(filename="../data/RenatoFabbri06022014.gdf"):
"""Made to work with my own network. Check file to ease adaptation"""
with open(filename,"r") as f:
data=f.read()
lines=data.split("\n")
columns=lines[0].split(">")[1].split(",")
column_names=[i.split(" ")[0] for i in columns]
data_friends={cn:[] for cn in column_names}
for line in lines[1:]:
if not line:
break
if ">" in line:
columns=line.split(">")[1].split(",")
column_names2=[i.split(" ")[0] for i in columns]
data_friendships={cn:[] for cn in column_names2}
continue
fields=line.split(",")
if "column_names2" not in locals():
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friends[column_names[i]].append(field)
else:
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friendships[column_names2[i]].append(field)
return {"friendships":data_friendships,
"friends":data_friends}
#self.makeNetwork()
class GDFgraph:
"""Read GDF graph into networkX"""
def __init__(self,filename="../data/RenatoFabbri06022014.gdf"):
with open(filename,"r") as f:
self.data=f.read()
self.lines=self.data.split("\n")
columns=self.lines[0].split(">")[1].split(",")
column_names=[i.split(" ")[0] for i in columns]
data_friends={cn:[] for cn in column_names}
for line in self.lines[1:]:
if not line:
break
if ">" in line:
columns=line.split(">")[1].split(",")
column_names2=[i.split(" ")[0] for i in columns]
data_friendships={cn:[] for cn in column_names2}
continue
fields=line.split(",")
if "column_names2" not in locals():
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friends[column_names[i]].append(field)
else:
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friendships[column_names2[i]].append(field)
self.data_friendships=data_friendships
self.data_friends=data_friends
self.n_friends=len(data_friends[column_names[0]])
self.n_friendships=len(data_friendships[column_names2[0]])
self.makeNetwork()
def makeNetwork(self):
"""Makes graph object from .gdf loaded data"""
if "weight" in self.data_friendships.keys():
self.G=G=x.DiGraph()
else:
self.G=G=x.Graph()
F=self.data_friends
for friendn in range(self.n_friends):
if "posts" in F.keys():
G.add_node(F["name"][friendn],
label=F["label"][friendn],
posts=F["posts"][friendn])
elif "agerank" in F.keys():
G.add_node(F["name"][friendn],
label=F["label"][friendn],
gender=F["sex"][friendn],
locale=F["locale"][friendn],
agerank=F["agerank"][friendn])
else:
G.add_node(F["name"][friendn],
label=F["label"][friendn],
gender=F["sex"][friendn],
locale=F["locale"][friendn])
F=self.data_friendships
for friendshipn in range(self.n_friendships):
if "weight" in F.keys():
G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn],weight=F["weight"][friendshipn])
else:
G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn])
def readFBPost(fpath=""):
"""Extract information from HTML page with a Facebook post"""
html=open(fpath,"rb")
soup = BeautifulSoup(html, "lxml")
return soup
class ScrapyBrowser:
"""Opens a browser for user to login to facebook.
Such browser pulls data as requested by user."""
def __init__(self,user_email=None, user_password=None,basedir="~/.social/"):
self._BASE_DIR=basedir.replace("~",os.path.expanduser("~"))
if not os.path.isdir(self._BASE_DIR):
os.mkdir(self._BASE_DIR)
print("Opening *Scrappy* firefox browser. Please wait.")
self.browser=browser=Browser(wait_time=2)
url="http://facebook.com"
browser.visit(url)
if (not user_email) or (not user_password):
input("\n\n==> Input user and password and login, please.\
and then press <enter>")
else:
browser.fill("email",user_email)
browser.fill("pass",user_password)
browser.find_by_value("Log In").click()
def getFriends(self,user_id="astronauta.mecanico",write=True):
"""Returns user_ids (that you have access) of the friends of your friend with user_ids"""
while user_id not in self.browser.url:
self.browser.visit("http://www.facebook.com/{}/friends".format(user_id), wait_time=3)
#self.go("http://www.facebook.com/{}/friends".format(user_id))
T=time.time()
while 1:
h1=self.browser.evaluate_script("document.body.scrollHeight")
self.browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
h2=self.browser.evaluate_script("document.body.scrollHeight")
if h1 != h2:
T=time.time()
elif time.time()-T>10:
break
#links=self.browser.find_link_by_partial_href("hc_location=friends_tab")
links=self.browser.find_by_css(".fcb")
friends=[]
for link in links:
name=link.value
user_id_=link.find_by_tag("a")["href"].split("/")[-1].split("?")[0]
friends.append((user_id_,name))
tdict={}
tdict["name"]=self.browser.find_by_id("fb-timeline-cover-name").value
tdict["user_id"]=user_id
tdict["friends"]=friends
infos=self.browser.find_by_css("._3c_")
mutual=0
for info in infos:
if info.value=="Mutual Friends":
if info.find_by_css("._3d0").value:
tdict["n_mutual"]=info.find_by_css("._3d0").value
mutual=1
if info.value=="All Friends":
tdict["n_friends"]=info.find_by_css("._3d0").value
if mutual==0:
links=self.browser.find_by_css("._gs6")
if "Mutual" in links.value:
tdict["n_mutual"]=links.value.split(" ")[0]
if write:
if not os.path.isdir("{}/fb_ids/".format(self._BASE_DIR)):
os.mkdir("{}/fb_ids/".format(self._BASE_DIR))
with open("{}fb_ids/{}.pickle".format(self._BASE_DIR,user_id),"wb") as f:
pickle.dump(tdict,f)
self.tdict=tdict
return tdict
|
ttm/socialLegacy
|
social/fb/fb.py
|
triplifyGDFInteraction
|
python
|
def triplifyGDFInteraction(fname="foo.gdf",fpath="./fb/",scriptpath=None,uid=None,sid=None,dlink=None):
#aname=fname.split("/")[-1].split(".")[0]+"_fb"
aname=fname.split("/")[-1].split(".")[0]
if re.findall("[a-zA-Z]*_[0-9]",fname):
name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gdf",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
elif re.findall("(\d)",fname):
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gdf",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
else:
datetime_snapshot=datetime.datetime(2013,3,15).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",aname))
aname+="_fb"
name=aname
tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"The facebook interaction network from the {} file".format(fname)) # drop de agraph
tg2=P.rdf.makeBasicGraph([["po"],[P.rdf.ns.per]],"Metadata for my facebook ego friendship network RDF files") # drop de agraph
ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot,
aname,"Snapshot {}".format(aname))
foo={"uris":[],"vals":[]}
if sid:
foo["uris"].append(P.rdf.ns.fb.sid)
foo["vals"].append(sid)
if uid:
foo["uris"].append(P.rdf.ns.fb.uid)
foo["vals"].append(uid)
if dlink:
foo["uris"].append(P.rdf.ns.fb.link)
foo["vals"].append(dlink)
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.createdAt,
P.rdf.ns.po.triplifiedIn,
P.rdf.ns.po.donatedBy,
P.rdf.ns.po.availableAt,
P.rdf.ns.po.originalFile,
P.rdf.ns.po.rdfFile,
P.rdf.ns.po.ttlFile,
P.rdf.ns.po.discorveryRDFFile,
P.rdf.ns.po.discoveryTTLFile,
P.rdf.ns.po.acquiredThrough,
P.rdf.ns.rdfs.comment,
]+foo["uris"],
[datetime_snapshot,
datetime.datetime.now(),
name,
"https://github.com/ttm/{}".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.owl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.owl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname),
"Netvizz",
"The facebook friendship network from {}".format(name_),
]+foo["vals"])
#for friend_attr in fg2["friends"]:
fg2=readGDF(fname)
tkeys=list(fg2["friends"].keys())
def trans(tkey):
if tkey=="name":
return "uid"
if tkey=="label":
return "name"
return tkey
foo={"uris":[],"vals":[]}
for tkey in tkeys:
if tkey=="groupid":
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.uid,],
[fg2["friends"][tkey][0]])
if tkey:
foo["uris"]+=[eval("P.rdf.ns.fb."+trans(tkey))]
foo["vals"]+=[fg2["friends"][tkey]]
print(tkeys)
iname=tkeys.index("name")
ilabel=tkeys.index("label")
icount=0
name_label={}
for vals_ in zip(*foo["vals"]):
name,label=[foo["vals"][i][icount] for i in (iname,ilabel)]
if not label:
label="po:noname"
vals_=list(vals_)
vals_[ilabel]=label
name_label[name]=label
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label)
P.rdf.link([tg],ind,label,foo["uris"],
vals_,draw=False)
icount+=1
friendships_=[fg2["friendships"][i] for i in ("node1","node2")]
c("escritos participantes")
i=1
for uid1,uid2 in zip(*friendships_):
flabel="{}-{}".format(uid1,uid2)
labels=[name_label[uu] for uu in (uid1,uid2)]
ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,
flabel)
#flabel,"Friendship "+flabel)
ind1=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid1)
ind2=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid2)
uids=[r.URIRef(P.rdf.ns.fb.Participant+"#"+str(i)) for i in (uid1,uid2)]
P.rdf.link_([tg],ind,"Friendship "+flabel,[P.rdf.ns.fb.member]*2,
uids,labels,draw=False)
P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1])
if (i%1000)==0:
c(i)
i+=1
P.rdf.G(tg[0],P.rdf.ns.fb.friend,
P.rdf.ns.rdf.type,
P.rdf.ns.owl.SymmetricProperty)
c("escritas amizades")
tg_=[tg[0]+tg2[0],tg[1]]
fpath_="{}{}/".format(fpath,aname)
P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1)
# copia o script que gera este codigo
if not os.path.isdir(fpath_+"scripts"):
os.mkdir(fpath_+"scripts")
shutil.copy(scriptpath,fpath_+"scripts/")
# copia do base data
if not os.path.isdir(fpath_+"base"):
os.mkdir(fpath_+"base")
shutil.copy(fname,fpath_+"base/")
P.rdf.writeAll(tg2,aname+"Meta",fpath_,1)
# faz um README
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the facebook
friendship network of {} collected at {}.
It has {} friends with metadata {};
and {} friendships.
The linked data is available at rdf/ dir and was
generated by the routine in the script/ directory.
Original data from Netvizz in data/\n""".format(
name_,datetime_snapshot,
len(fg2["friends"]["name"]),
"facebook numeric id, name, locale, sex and agerank",
len(fg2["friendships"]["node1"])
))
|
Produce a linked data publication tree from GDF files of a Facebook interaction network.
INPUTS:
=> the file name (fname, with path) where the gdf file
of the friendship network is.
=> the final path (fpath) for the tree of files to be created.
=> a path to the script that is calling this function (scriptpath).
=> the numeric id (uid) of the facebook group
=> the string id (sid) of the facebook group of which fname holds a friendship network
OUTPUTS:
the tree in the directory fpath.
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fb/fb.py#L170-L326
|
[
"def readGDF(filename=\"../data/RenatoFabbri06022014.gdf\"):\n \"\"\"Made to work with my own network. Check file to ease adaptation\"\"\"\n with open(filename,\"r\") as f:\n data=f.read()\n lines=data.split(\"\\n\")\n columns=lines[0].split(\">\")[1].split(\",\")\n column_names=[i.split(\" \")[0] for i in columns]\n data_friends={cn:[] for cn in column_names}\n for line in lines[1:]:\n if not line:\n break\n if \">\" in line:\n columns=line.split(\">\")[1].split(\",\")\n column_names2=[i.split(\" \")[0] for i in columns]\n data_friendships={cn:[] for cn in column_names2}\n continue\n fields=line.split(\",\")\n if \"column_names2\" not in locals():\n for i, field in enumerate(fields):\n if field.isdigit(): field=int(field)\n data_friends[column_names[i]].append(field)\n else:\n for i, field in enumerate(fields):\n if field.isdigit(): field=int(field)\n data_friendships[column_names2[i]].append(field)\n return {\"friendships\":data_friendships,\n \"friends\":data_friends}\n",
"def trans(tkey):\n if tkey==\"name\":\n return \"uid\"\n if tkey==\"label\":\n return \"name\"\n return tkey\n"
] |
import time, os, pickle, shutil, datetime, re
import networkx as x, rdflib as r
from splinter import Browser
from bs4 import BeautifulSoup
import percolation as P
c=P.utils.check
this_dir = os.path.split(__file__)[0]
NS=P.rdf.ns
a=NS.rdf.type
def triplifyGML(fname="foo.gml",fpath="./fb/",scriptpath=None,uid=None,sid=None,extra_info=None):
"""Produce a linked data publication tree from a standard GML file.
INPUTS:
=> the file name (fname, with path) where the gdf file
of the friendship network is.
=> the final path (fpath) for the tree of files to be created.
=> a path to the script that is calling this function (scriptpath).
=> the numeric id (uid) of the facebook user of which fname holds a friendship network
=> the numeric id (sid) of the facebook user of which fname holds a friendship network
OUTPUTS:
the tree in the directory fpath."""
# aname=fname.split("/")[-1].split(".")[0]
aname=fname.split("/")[-1].split(".")[0]
if "RonaldCosta" in fname:
aname=fname.split("/")[-1].split(".")[0]
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).gml",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_="Ronald Scherolt Costa"
elif "AntonioAnzoategui" in fname:
aname=re.findall(".*/([a-zA-Z]*\d*)",fname)[0]
name,year,month,day,hour,minute=re.findall(r".*/([a-zA-Z]*).*_(\d+)_(\d*)_(\d*)_(\d*)_(\d*)_.*",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat()[:-3]
name_="Antônio Anzoategui Fabbri"
elif re.findall(".*/[a-zA-Z]*(\d)",fname):
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gml",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
elif re.findall("[a-zA-Z]*_",fname):
name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gml",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
else:
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
aname+="_fb"
name+="_fb"
c("started snapshot",aname)
tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"the {} facebook ego friendship network")
tg2=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"RDF metadata for the facebook friendship network of my son")
snapshot=P.rdf.IC([tg2],P.rdf.ns.po.FacebookSnapshot,
aname,"Snapshot {}".format(aname))
extra_uri=extra_val=[]
if extra_info:
extra_uri=[NS.po.extraInfo]
extra_val=[extra_info]
P.rdf.link([tg2],snapshot,"Snapshot {}".format(aname),
[P.rdf.ns.po.createdAt,
P.rdf.ns.po.triplifiedIn,
P.rdf.ns.po.donatedBy,
P.rdf.ns.po.availableAt,
P.rdf.ns.po.originalFile,
P.rdf.ns.po.onlineTranslateXMLFile,
P.rdf.ns.po.onlineTranslateTTLFile,
P.rdf.ns.po.translateXMLFile,
P.rdf.ns.po.translateTTLFile,
P.rdf.ns.po.onlineMetaXMLFile,
P.rdf.ns.po.onlineMetaTTLFile,
P.rdf.ns.po.metaXMLFilename,
P.rdf.ns.po.metaTTLFilename,
P.rdf.ns.po.acquiredThrough,
P.rdf.ns.rdfs.comment,
P.rdf.ns.fb.uid,
P.rdf.ns.fb.sid
]+extra_uri,
[datetime_snapshot,
datetime.datetime.now(),
name,
"https://github.com/ttm/{}".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")[-1]),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.rdf".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname),
"{}Translate.rdf".format(aname),
"{}Translate.ttl".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.rdf".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname),
"{}Meta.owl".format(aname),
"{}Meta.ttl".format(aname),
"Netvizz",
"The facebook friendship network from {}".format(name_),
uid,
sid
]+extra_val)
#for friend_attr in fg2["friends"]:
c((aname,name_,datetime_snapshot))
fg2=x.read_gml(fname)
c("read gml")
for uid in fg2:
c(uid)
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,"{}-{}".format(aname,uid))
if "locale" in fg2.node[uid].keys():
data=[fg2.node[uid][attr] for attr in ("id","label","locale","sex","agerank","wallcount")]
uris=[NS.fb.gid, NS.fb.name,
NS.fb.locale, NS.fb.sex,
NS.fb.agerank,NS.fb.wallcount]
else:
data=[fg2.node[uid][attr] for attr in ("id","label","sex","agerank","wallcount")]
uris=[NS.fb.gid, NS.fb.name,
NS.fb.sex,
NS.fb.agerank,NS.fb.wallcount]
P.rdf.link([tg],ind, None,uris,data,draw=False)
P.rdf.link_([tg],ind,None,[NS.po.snapshot],[snapshot],draw=False)
#friends_=[fg2["friends"][i] for i in ("name","label","locale","sex","agerank")]
#for name,label,locale,sex,agerank in zip(*friends_):
# ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label)
# P.rdf.link([tg],ind,label,[P.rdf.ns.fb.uid,P.rdf.ns.fb.name,
# P.rdf.ns.fb.locale,P.rdf.ns.fb.sex,
# P.rdf.ns.fb.agerank],
# [name,label,locale,sex,agerank])
c("escritos participantes")
#friendships_=[fg2["friendships"][i] for i in ("node1","node2")]
i=1
for uid1,uid2 in fg2.edges():
flabel="{}-{}-{}".format(aname,uid1,uid2)
ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,flabel)
uids=[P.rdf.IC(None,P.rdf.ns.fb.Participant,"{}-{}".format(aname,i)) for i in (uid1,uid2)]
P.rdf.link_([tg],ind,flabel,[NS.po.snapshot]+[NS.fb.member]*2,
[snapshot]+uids,draw=False)
P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1])
if (i%1000)==0:
c(i)
i+=1
c("escritas amizades")
tg_=[tg[0]+tg2[0],tg[1]]
fpath_="{}/{}/".format(fpath,aname)
P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1)
# copia o script que gera este codigo
if not os.path.isdir(fpath_+"scripts"):
os.mkdir(fpath_+"scripts")
#shutil.copy(this_dir+"/../tests/rdfMyFNetwork2.py",fpath+"scripts/")
shutil.copy(scriptpath,fpath_+"scripts/")
# copia do base data
if not os.path.isdir(fpath_+"base"):
os.mkdir(fpath_+"base")
shutil.copy(fname,fpath_+"base/")
P.rdf.writeAll(tg2,aname+"Meta",fpath_,False)
# faz um README
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the facebook
friendship network of {} ({}) collected at {}.
It has {} friends with metadata {};
and {} friendships.
The linked data is available at rdf/ dir and was
generated by the routine in the script/ directory.
Original data from Netvizz in data/\n""".format(
name_,aname,datetime_snapshot,
fg2.number_of_nodes(),
"name, locale (maybe), sex, agerank and wallcount",
fg2.number_of_edges()))
def triplifyGDFInteraction(fname="foo.gdf",fpath="./fb/",scriptpath=None,uid=None,sid=None,dlink=None):
"""Produce a linked data publication tree from GDF files of a Facebook interaction network.
INPUTS:
=> the file name (fname, with path) where the gdf file
of the friendship network is.
=> the final path (fpath) for the tree of files to be created.
=> a path to the script that is calling this function (scriptpath).
=> the numeric id (uid) of the facebook group
=> the string id (sid) of the facebook group of which fname holds a friendship network
OUTPUTS:
the tree in the directory fpath."""
#aname=fname.split("/")[-1].split(".")[0]+"_fb"
aname=fname.split("/")[-1].split(".")[0]
if re.findall("[a-zA-Z]*_[0-9]",fname):
name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gdf",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
elif re.findall("(\d)",fname):
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gdf",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
else:
datetime_snapshot=datetime.datetime(2013,3,15).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",aname))
aname+="_fb"
name=aname
tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"The facebook interaction network from the {} file".format(fname)) # drop de agraph
tg2=P.rdf.makeBasicGraph([["po"],[P.rdf.ns.per]],"Metadata for my facebook ego friendship network RDF files") # drop de agraph
ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot,
aname,"Snapshot {}".format(aname))
foo={"uris":[],"vals":[]}
if sid:
foo["uris"].append(P.rdf.ns.fb.sid)
foo["vals"].append(sid)
if uid:
foo["uris"].append(P.rdf.ns.fb.uid)
foo["vals"].append(uid)
if dlink:
foo["uris"].append(P.rdf.ns.fb.link)
foo["vals"].append(dlink)
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.createdAt,
P.rdf.ns.po.triplifiedIn,
P.rdf.ns.po.donatedBy,
P.rdf.ns.po.availableAt,
P.rdf.ns.po.originalFile,
P.rdf.ns.po.rdfFile,
P.rdf.ns.po.ttlFile,
P.rdf.ns.po.discorveryRDFFile,
P.rdf.ns.po.discoveryTTLFile,
P.rdf.ns.po.acquiredThrough,
P.rdf.ns.rdfs.comment,
]+foo["uris"],
[datetime_snapshot,
datetime.datetime.now(),
name,
"https://github.com/ttm/{}".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.owl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.owl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname),
"Netvizz",
"The facebook friendship network from {}".format(name_),
]+foo["vals"])
#for friend_attr in fg2["friends"]:
fg2=readGDF(fname)
tkeys=list(fg2["friends"].keys())
def trans(tkey):
if tkey=="name":
return "uid"
if tkey=="label":
return "name"
return tkey
foo={"uris":[],"vals":[]}
for tkey in tkeys:
if tkey=="groupid":
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.uid,],
[fg2["friends"][tkey][0]])
if tkey:
foo["uris"]+=[eval("P.rdf.ns.fb."+trans(tkey))]
foo["vals"]+=[fg2["friends"][tkey]]
print(tkeys)
iname=tkeys.index("name")
ilabel=tkeys.index("label")
icount=0
name_label={}
for vals_ in zip(*foo["vals"]):
name,label=[foo["vals"][i][icount] for i in (iname,ilabel)]
if not label:
label="po:noname"
vals_=list(vals_)
vals_[ilabel]=label
name_label[name]=label
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label)
P.rdf.link([tg],ind,label,foo["uris"],
vals_,draw=False)
icount+=1
friendships_=[fg2["friendships"][i] for i in ("node1","node2")]
c("escritos participantes")
i=1
for uid1,uid2 in zip(*friendships_):
flabel="{}-{}".format(uid1,uid2)
labels=[name_label[uu] for uu in (uid1,uid2)]
ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,
flabel)
#flabel,"Friendship "+flabel)
ind1=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid1)
ind2=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid2)
uids=[r.URIRef(P.rdf.ns.fb.Participant+"#"+str(i)) for i in (uid1,uid2)]
P.rdf.link_([tg],ind,"Friendship "+flabel,[P.rdf.ns.fb.member]*2,
uids,labels,draw=False)
P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1])
if (i%1000)==0:
c(i)
i+=1
P.rdf.G(tg[0],P.rdf.ns.fb.friend,
P.rdf.ns.rdf.type,
P.rdf.ns.owl.SymmetricProperty)
c("escritas amizades")
tg_=[tg[0]+tg2[0],tg[1]]
fpath_="{}{}/".format(fpath,aname)
P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1)
# copia o script que gera este codigo
if not os.path.isdir(fpath_+"scripts"):
os.mkdir(fpath_+"scripts")
shutil.copy(scriptpath,fpath_+"scripts/")
# copia do base data
if not os.path.isdir(fpath_+"base"):
os.mkdir(fpath_+"base")
shutil.copy(fname,fpath_+"base/")
P.rdf.writeAll(tg2,aname+"Meta",fpath_,1)
# faz um README
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the facebook
friendship network of {} collected at {}.
It has {} friends with metadata {};
and {} friendships.
The linked data is available at rdf/ dir and was
generated by the routine in the script/ directory.
Original data from Netvizz in data/\n""".format(
name_,datetime_snapshot,
len(fg2["friends"]["name"]),
"facebook numeric id, name, locale, sex and agerank",
len(fg2["friendships"]["node1"])
))
def triplifyGDF(fname="foo.gdf",fpath="./fb/",scriptpath=None,uid=None,sid=None,dlink=None):
"""Produce a linked data publication tree from a standard GDF file.
INPUTS:
=> the file name (fname, with path) where the gdf file
of the friendship network is.
=> the final path (fpath) for the tree of files to be created.
=> a path to the script that is calling this function (scriptpath).
=> the numeric id (uid) of the facebook user of which fname holds a friendship network
=> the numeric id (sid) of the facebook user of which fname holds a friendship network
OUTPUTS:
the tree in the directory fpath."""
#aname=fname.split("/")[-1].split(".")[0]+"_fb"
aname=fname.split("/")[-1].split(".")[0]
if re.findall("[a-zA-Z]*_[0-9]",fname):
name,year,month,day,hour,minute=re.findall(".*/([a-zA-Z]*).*(\d\d\d\d)_(\d\d)_(\d\d)_(\d\d)_(\d\d).*.gdf",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day,hour,minute)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
elif re.findall("(\d)",fname):
name,day,month,year=re.findall(".*/([a-zA-Z]*)(\d\d)(\d\d)(\d\d\d\d).*.gdf",fname)[0]
datetime_snapshot=datetime.datetime(*[int(i) for i in (year,month,day)]).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",name))
else:
datetime_snapshot=datetime.datetime(2013,3,15).isoformat().split("T")[0]
name_=" ".join(re.findall("[A-Z][^A-Z]*",aname))
aname+="_fb"
name=aname
tg=P.rdf.makeBasicGraph([["po","fb"],[P.rdf.ns.per,P.rdf.ns.fb]],"My facebook ego friendship network") # drop de agraph
tg2=P.rdf.makeBasicGraph([["po"],[P.rdf.ns.per]],"Metadata for my facebook ego friendship network RDF files") # drop de agraph
ind=P.rdf.IC([tg2],P.rdf.ns.po.Snapshot,
aname,"Snapshot {}".format(aname))
foo={"uris":[],"vals":[]}
if sid:
foo["uris"].append(P.rdf.ns.fb.sid)
foo["vals"].append(sid)
if uid:
foo["uris"].append(P.rdf.ns.fb.uid)
foo["vals"].append(uid)
if dlink:
foo["uris"].append(P.rdf.ns.fb.link)
foo["vals"].append(dlink)
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.createdAt,
P.rdf.ns.po.triplifiedIn,
P.rdf.ns.po.donatedBy,
P.rdf.ns.po.availableAt,
P.rdf.ns.po.originalFile,
P.rdf.ns.po.rdfFile,
P.rdf.ns.po.ttlFile,
P.rdf.ns.po.discorveryRDFFile,
P.rdf.ns.po.discoveryTTLFile,
P.rdf.ns.po.acquiredThrough,
P.rdf.ns.rdfs.comment,
]+foo["uris"],
[datetime_snapshot,
datetime.datetime.now(),
name,
"https://github.com/ttm/{}".format(aname),
"https://raw.githubusercontent.com/ttm/{}/master/base/{}".format(aname,fname.split("/")),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.owl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Translate.ttl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.owl".format(aname,aname),
"https://raw.githubusercontent.com/ttm/{}/master/rdf/{}Meta.ttl".format(aname,aname),
"Netvizz",
"The facebook friendship network from {}".format(name_),
]+foo["vals"])
#for friend_attr in fg2["friends"]:
fg2=readGDF(fname)
tkeys=list(fg2["friends"].keys())
def trans(tkey):
if tkey=="name":
return "uid"
if tkey=="label":
return "name"
return tkey
foo={"uris":[],"vals":[]}
for tkey in tkeys:
if tkey=="groupid":
P.rdf.link([tg2],ind,"Snapshot {}".format(aname),
[P.rdf.ns.po.uid,],
[fg2["friends"][tkey][0]])
if tkey:
foo["uris"]+=[eval("P.rdf.ns.fb."+trans(tkey))]
foo["vals"]+=[fg2["friends"][tkey]]
print(tkeys)
iname=tkeys.index("name")
ilabel=tkeys.index("label")
icount=0
name_label={}
for vals_ in zip(*foo["vals"]):
name,label=[foo["vals"][i][icount] for i in (iname,ilabel)]
if not label:
label="po:noname"
vals_=list(vals_)
vals_[ilabel]=label
name_label[name]=label
ind=P.rdf.IC([tg],P.rdf.ns.fb.Participant,name,label)
P.rdf.link([tg],ind,label,foo["uris"],
vals_,draw=False)
icount+=1
friendships_=[fg2["friendships"][i] for i in ("node1","node2")]
c("escritos participantes")
i=1
for uid1,uid2 in zip(*friendships_):
flabel="{}-{}".format(uid1,uid2)
labels=[name_label[uu] for uu in (uid1,uid2)]
ind=P.rdf.IC([tg],P.rdf.ns.fb.Friendship,
flabel)
#flabel,"Friendship "+flabel)
ind1=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid1)
ind2=P.rdf.IC(None,P.rdf.ns.fb.Participant,uid2)
uids=[r.URIRef(P.rdf.ns.fb.Participant+"#"+str(i)) for i in (uid1,uid2)]
P.rdf.link_([tg],ind,"Friendship "+flabel,[P.rdf.ns.fb.member]*2,
uids,labels,draw=False)
P.rdf.L_([tg],uids[0],P.rdf.ns.fb.friend,uids[1])
if (i%1000)==0:
c(i)
i+=1
P.rdf.G(tg[0],P.rdf.ns.fb.friend,
P.rdf.ns.rdf.type,
P.rdf.ns.owl.SymmetricProperty)
c("escritas amizades")
tg_=[tg[0]+tg2[0],tg[1]]
fpath_="{}{}/".format(fpath,aname)
P.rdf.writeAll(tg_,aname+"Translate",fpath_,False,1)
# copia o script que gera este codigo
if not os.path.isdir(fpath_+"scripts"):
os.mkdir(fpath_+"scripts")
shutil.copy(scriptpath,fpath_+"scripts/")
# copia do base data
if not os.path.isdir(fpath_+"base"):
os.mkdir(fpath_+"base")
shutil.copy(fname,fpath_+"base/")
P.rdf.writeAll(tg2,aname+"Meta",fpath_,1)
# faz um README
with open(fpath_+"README","w") as f:
f.write("""This repo delivers RDF data from the facebook
friendship network of {} collected at {}.
It has {} friends with metadata {};
and {} friendships.
The linked data is available at rdf/ dir and was
generated by the routine in the script/ directory.
Original data from Netvizz in data/\n""".format(
name_,datetime_snapshot,
len(fg2["friends"]["name"]),
"facebook numeric id, name, locale, sex and agerank",
len(fg2["friendships"]["node1"])
))
def makeRDF(readgdf_dict,fdir="../data/rdf/"):
# return rdflib graph from the data
rd=readgdf_dict
# ns=namespaces=pe.namespaces(["rdf","rdfs","xsd", # basic namespaces
# ])
# for friend in range(len(rd["friends"]["name"])):
# pass
def readGDF(filename="../data/RenatoFabbri06022014.gdf"):
"""Made to work with my own network. Check file to ease adaptation"""
with open(filename,"r") as f:
data=f.read()
lines=data.split("\n")
columns=lines[0].split(">")[1].split(",")
column_names=[i.split(" ")[0] for i in columns]
data_friends={cn:[] for cn in column_names}
for line in lines[1:]:
if not line:
break
if ">" in line:
columns=line.split(">")[1].split(",")
column_names2=[i.split(" ")[0] for i in columns]
data_friendships={cn:[] for cn in column_names2}
continue
fields=line.split(",")
if "column_names2" not in locals():
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friends[column_names[i]].append(field)
else:
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friendships[column_names2[i]].append(field)
return {"friendships":data_friendships,
"friends":data_friends}
#self.makeNetwork()
class GDFgraph:
"""Read GDF graph into networkX"""
def __init__(self,filename="../data/RenatoFabbri06022014.gdf"):
with open(filename,"r") as f:
self.data=f.read()
self.lines=self.data.split("\n")
columns=self.lines[0].split(">")[1].split(",")
column_names=[i.split(" ")[0] for i in columns]
data_friends={cn:[] for cn in column_names}
for line in self.lines[1:]:
if not line:
break
if ">" in line:
columns=line.split(">")[1].split(",")
column_names2=[i.split(" ")[0] for i in columns]
data_friendships={cn:[] for cn in column_names2}
continue
fields=line.split(",")
if "column_names2" not in locals():
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friends[column_names[i]].append(field)
else:
for i, field in enumerate(fields):
if field.isdigit(): field=int(field)
data_friendships[column_names2[i]].append(field)
self.data_friendships=data_friendships
self.data_friends=data_friends
self.n_friends=len(data_friends[column_names[0]])
self.n_friendships=len(data_friendships[column_names2[0]])
self.makeNetwork()
def makeNetwork(self):
"""Makes graph object from .gdf loaded data"""
if "weight" in self.data_friendships.keys():
self.G=G=x.DiGraph()
else:
self.G=G=x.Graph()
F=self.data_friends
for friendn in range(self.n_friends):
if "posts" in F.keys():
G.add_node(F["name"][friendn],
label=F["label"][friendn],
posts=F["posts"][friendn])
elif "agerank" in F.keys():
G.add_node(F["name"][friendn],
label=F["label"][friendn],
gender=F["sex"][friendn],
locale=F["locale"][friendn],
agerank=F["agerank"][friendn])
else:
G.add_node(F["name"][friendn],
label=F["label"][friendn],
gender=F["sex"][friendn],
locale=F["locale"][friendn])
F=self.data_friendships
for friendshipn in range(self.n_friendships):
if "weight" in F.keys():
G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn],weight=F["weight"][friendshipn])
else:
G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn])
def readFBPost(fpath=""):
"""Extract information from HTML page with a Facebook post"""
html=open(fpath,"rb")
soup = BeautifulSoup(html, "lxml")
return soup
class ScrapyBrowser:
"""Opens a browser for user to login to facebook.
Such browser pulls data as requested by user."""
def __init__(self,user_email=None, user_password=None,basedir="~/.social/"):
self._BASE_DIR=basedir.replace("~",os.path.expanduser("~"))
if not os.path.isdir(self._BASE_DIR):
os.mkdir(self._BASE_DIR)
print("Opening *Scrappy* firefox browser. Please wait.")
self.browser=browser=Browser(wait_time=2)
url="http://facebook.com"
browser.visit(url)
if (not user_email) or (not user_password):
input("\n\n==> Input user and password and login, please.\
and then press <enter>")
else:
browser.fill("email",user_email)
browser.fill("pass",user_password)
browser.find_by_value("Log In").click()
def getFriends(self,user_id="astronauta.mecanico",write=True):
"""Returns user_ids (that you have access) of the friends of your friend with user_ids"""
while user_id not in self.browser.url:
self.browser.visit("http://www.facebook.com/{}/friends".format(user_id), wait_time=3)
#self.go("http://www.facebook.com/{}/friends".format(user_id))
T=time.time()
while 1:
h1=self.browser.evaluate_script("document.body.scrollHeight")
self.browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
h2=self.browser.evaluate_script("document.body.scrollHeight")
if h1 != h2:
T=time.time()
elif time.time()-T>10:
break
#links=self.browser.find_link_by_partial_href("hc_location=friends_tab")
links=self.browser.find_by_css(".fcb")
friends=[]
for link in links:
name=link.value
user_id_=link.find_by_tag("a")["href"].split("/")[-1].split("?")[0]
friends.append((user_id_,name))
tdict={}
tdict["name"]=self.browser.find_by_id("fb-timeline-cover-name").value
tdict["user_id"]=user_id
tdict["friends"]=friends
infos=self.browser.find_by_css("._3c_")
mutual=0
for info in infos:
if info.value=="Mutual Friends":
if info.find_by_css("._3d0").value:
tdict["n_mutual"]=info.find_by_css("._3d0").value
mutual=1
if info.value=="All Friends":
tdict["n_friends"]=info.find_by_css("._3d0").value
if mutual==0:
links=self.browser.find_by_css("._gs6")
if "Mutual" in links.value:
tdict["n_mutual"]=links.value.split(" ")[0]
if write:
if not os.path.isdir("{}/fb_ids/".format(self._BASE_DIR)):
os.mkdir("{}/fb_ids/".format(self._BASE_DIR))
with open("{}fb_ids/{}.pickle".format(self._BASE_DIR,user_id),"wb") as f:
pickle.dump(tdict,f)
self.tdict=tdict
return tdict
|
ttm/socialLegacy
|
social/fb/fb.py
|
ScrapyBrowser.getFriends
|
python
|
def getFriends(self,user_id="astronauta.mecanico",write=True):
while user_id not in self.browser.url:
self.browser.visit("http://www.facebook.com/{}/friends".format(user_id), wait_time=3)
#self.go("http://www.facebook.com/{}/friends".format(user_id))
T=time.time()
while 1:
h1=self.browser.evaluate_script("document.body.scrollHeight")
self.browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
h2=self.browser.evaluate_script("document.body.scrollHeight")
if h1 != h2:
T=time.time()
elif time.time()-T>10:
break
#links=self.browser.find_link_by_partial_href("hc_location=friends_tab")
links=self.browser.find_by_css(".fcb")
friends=[]
for link in links:
name=link.value
user_id_=link.find_by_tag("a")["href"].split("/")[-1].split("?")[0]
friends.append((user_id_,name))
tdict={}
tdict["name"]=self.browser.find_by_id("fb-timeline-cover-name").value
tdict["user_id"]=user_id
tdict["friends"]=friends
infos=self.browser.find_by_css("._3c_")
mutual=0
for info in infos:
if info.value=="Mutual Friends":
if info.find_by_css("._3d0").value:
tdict["n_mutual"]=info.find_by_css("._3d0").value
mutual=1
if info.value=="All Friends":
tdict["n_friends"]=info.find_by_css("._3d0").value
if mutual==0:
links=self.browser.find_by_css("._gs6")
if "Mutual" in links.value:
tdict["n_mutual"]=links.value.split(" ")[0]
if write:
if not os.path.isdir("{}/fb_ids/".format(self._BASE_DIR)):
os.mkdir("{}/fb_ids/".format(self._BASE_DIR))
with open("{}fb_ids/{}.pickle".format(self._BASE_DIR,user_id),"wb") as f:
pickle.dump(tdict,f)
self.tdict=tdict
return tdict
|
Returns user_ids (that you have access) of the friends of your friend with user_ids
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fb/fb.py#L611-L655
| null |
class ScrapyBrowser:
"""Opens a browser for user to login to facebook.
Such browser pulls data as requested by user."""
def __init__(self,user_email=None, user_password=None,basedir="~/.social/"):
self._BASE_DIR=basedir.replace("~",os.path.expanduser("~"))
if not os.path.isdir(self._BASE_DIR):
os.mkdir(self._BASE_DIR)
print("Opening *Scrappy* firefox browser. Please wait.")
self.browser=browser=Browser(wait_time=2)
url="http://facebook.com"
browser.visit(url)
if (not user_email) or (not user_password):
input("\n\n==> Input user and password and login, please.\
and then press <enter>")
else:
browser.fill("email",user_email)
browser.fill("pass",user_password)
browser.find_by_value("Log In").click()
|
ttm/socialLegacy
|
social/fb/read.py
|
readGDF
|
python
|
def readGDF(filename="../data/RenatoFabbri06022014.gdf"):
with open(filename,"r") as f:
data=f.read()
lines=data.split("\n")
columns=lines[0].split(">")[1].split(",")
column_names=[i.split(" ")[0] for i in columns]
data_friends={cn:[] for cn in column_names}
for line in lines[1:]:
if not line:
break
if ">" in line:
columns=line.split(">")[1].split(",")
column_names2=[i.split(" ")[0] for i in columns]
data_friendships={cn:[] for cn in column_names2}
continue
fields=line.split(",")
if "column_names2" not in locals():
for i, field in enumerate(fields):
if column_names[i] in ("name","groupid"): pass
elif field.isdigit(): field=int(field)
data_friends[column_names[i]].append(field)
else:
for i, field in enumerate(fields):
if column_names2[i]=="name": pass
elif field.isdigit(): field=int(field)
data_friendships[column_names2[i]].append(field)
return {"relations":data_friendships,
"individuals":data_friends}
|
Made to work with gdf files from my own network and friends and groups
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fb/read.py#L164-L192
| null |
import networkx as x, percolation as P, re
c=P.utils.check
def readGML2(filename="../data/RenatoFabbri06022014.gml"):
with open(filename,"r") as f:
data=f.read()
lines=data.split("\n")
nodes=[] # list of dicts, each a node
edges=[] # list of tuples
state="receive"
for line in lines:
if state=="receive":
if "node" in line:
state="node"
nodes.append({})
if "edge" in line:
state="edge"
edges.append({})
elif "]" in line:
state="receive"
elif "[" in line:
pass
elif state=="node":
var,val=re.findall(r"(.*?) (.*)",line.strip())[0]
if var=="id":
var="name"
val="user_{}".format(val)
elif '"' in val:
val=val.replace('"',"")
else:
val=int(val)
nodes[-1][var]=val
elif state=="edge":
var,val=line.strip().split()
edges[-1][var]=val
else:
c("SPURIOUS LINE: "+line)
keys=set([j for i in nodes for j in i.keys()])
nodes_={}
for key in keys:
if key == "id":
nodes_["name"]=[None]*len(nodes)
i=0
for node in nodes:
nodes_["name"][i]="user_{}".format(node[key])
i+=1
else:
nodes_[key]=[None]*len(nodes)
i=0
for node in nodes:
if key in node.keys():
nodes_[key][i]=node[key]
i+=1
c("para carregar as amizades")
edges_={"node1":[None]*len(edges), "node2":[None]*len(edges)}
i=0
for edge in edges:
u1="user_{}".format(edge["source"])
u2="user_{}".format(edge["target"])
edges_["node1"][i]=u1
edges_["node2"][i]=u2
i+=1
return {"relations": edges_,
"individuals": nodes_}
gg=x.read_gml(filename)
nodes=gg.nodes(data=True)
nodes_=[i[1] for i in nodes]
nodes__={}
nkeys=[]
c("para carregar os individuos")
for node in nodes_:
nkeys+=list(node.keys())
nkeys=set(nkeys)
for key in nkeys:
if key == "id":
nodes__["name"]=[None]*len(nodes_)
i=0
for node in nodes_:
nodes__["name"][i]="user_{}".format(node[key])
i+=1
else:
nodes__[key]=[None]*len(nodes_)
i=0
for node in nodes_:
if key in node.keys():
nodes__[key][i]=node[key]
i+=1
c("para carregar as amizades")
edges=gg.edges(data=True)
edges_={"node1":[None]*len(edges), "node2":[None]*len(edges)}
i=0
for edge in edges:
u1="user_{}".format(edge[0])
u2="user_{}".format(edge[1])
edges_["node1"][i]=u1
edges_["node2"][i]=u2
i+=1
# if edges[0][2]:
# edges_=[i[2] for i in edges]
# edges__={}
# ekeys=edges_[0].keys()
# for key in ekeys:
# edges__[key]=[]
# for edge in edges_:
# edges__[key]+=[edge[key]]
return {"relations": edges_,
"individuals": nodes__}
def readGML(filename="../data/RenatoFabbri06022014.gml"):
gg=x.read_gml(filename)
nodes=gg.nodes(data=True)
nodes_=[i[1] for i in nodes]
nodes__={}
nkeys=[]
c("para carregar os individuos")
for node in nodes_:
nkeys+=list(node.keys())
nkeys=set(nkeys)
for key in nkeys:
if key == "id":
nodes__["name"]=[None]*len(nodes_)
i=0
for node in nodes_:
nodes__["name"][i]="user_{}".format(node[key])
i+=1
else:
nodes__[key]=[None]*len(nodes_)
i=0
for node in nodes_:
if key in node.keys():
nodes__[key][i]=node[key]
i+=1
c("para carregar as amizades")
edges=gg.edges(data=True)
edges_={"node1":[None]*len(edges), "node2":[None]*len(edges)}
i=0
for edge in edges:
u1="user_{}".format(edge[0])
u2="user_{}".format(edge[1])
edges_["node1"][i]=u1
edges_["node2"][i]=u2
i+=1
# if edges[0][2]:
# edges_=[i[2] for i in edges]
# edges__={}
# ekeys=edges_[0].keys()
# for key in ekeys:
# edges__[key]=[]
# for edge in edges_:
# edges__[key]+=[edge[key]]
return {"relations": edges_,
"individuals": nodes__}
return gg
|
ttm/socialLegacy
|
social/tw.py
|
Twitter.searchTag
|
python
|
def searchTag(self,HTAG="#python"):
self.t = Twython(app_key =self.app_key ,
app_secret =self.app_secret ,
oauth_token =self.oauth_token ,
oauth_token_secret =self.oauth_token_secret)
search =self.t.search(q=HTAG,count=100,result_type="recent")
ss=search[:]
search = self.t.search(q=HTAG,count=150,max_id=ss[-1]['id']-1,result_type="recent")
#search = t.search(q=HTAG,count=150,since_id=ss[-1]['id'],result_type="recent")
while seach:
ss+=search[:]
search = self.t.search(q=HTAG,count=150,max_id=ss[-1]['id']-1,result_type="recent")
self.ss=ss
|
Set Twitter search or stream criteria for the selection of tweets
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/tw.py#L293-L307
| null |
class Twitter:
"""Simplified Twitter interface for Stability observance
# function to set authentication: __init__()
# function to set hashtag and other tweets selection criteria: searchTag()
# function to search tweets: searchTag()
# function to stream tweets: void
"""
TWITTER_API_KEY = tw.tak
TWITTER_API_KEY_SECRET = tw.taks
TWITTER_ACCESS_TOKEN = tw.tat
TWITTER_ACCESS_TOKEN_SECRET = tw.tats
def __init__(self,app_key= None,
app_secret= None,
oauth_token= None,
oauth_token_secret=None,):
"""Start twitter seach and stream interface"""
if not app_key:
self.app_key= self.TWITTER_API_KEY
self.app_secret= self.TWITTER_API_KEY_SECRET
self.oauth_token= self.TWITTER_ACCESS_TOKEN
self.oauth_token_secret=self.TWITTER_ACCESS_TOKEN_SECRET
else:
self.app_key= app_key
self.app_secret= app_secret
self.oauth_token= oauth_token
self.oauth_token_secret=oauth_token_secret
def streamTag(self,HTAG="#python",aname=None):
if not aname:
aname=HTAG[1:]+"_tw"
stream=MyStreamer(self.app_key ,
self.app_secret ,
self.oauth_token ,
self.oauth_token_secret)
stream.putName(aname)
self.stream=stream
stream.statuses.filter(track=HTAG)
def finishStream(self):
self.stream.D.close()
|
ttm/socialLegacy
|
social/fsong.py
|
FSong.makePartitions
|
python
|
def makePartitions(self):
class NetworkMeasures:
pass
self.nm=nm=NetworkMeasures()
nm.degrees=self.network.degree()
nm.nodes_= sorted(self.network.nodes(), key=lambda x : nm.degrees[x])
nm.degrees_=[nm.degrees[i] for i in nm.nodes_]
nm.edges= self.network.edges(data=True)
nm.E=self.network.number_of_edges()
nm.N=self.network.number_of_nodes()
self.np=g.NetworkPartitioning(nm,10,metric="g")
|
Make partitions with gmane help.
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fsong.py#L29-L41
| null |
class FSong:
"""Create song from undirected (friendship) network
"""
def __init__(self, network,basedir="fsong/",clean=False,render_images=False,render_images2=False,make_video=False):
os.system("mkdir {}".format(basedir))
if clean:
os.system("rm {}*".format(basedir))
self.basedir=basedir
self.network=network
self.makePartitions()
if render_images:
self.makeImages()
self.make_video=make_video
self.makeSong()
if render_images2:
self.makeImages2()
self.makeSong2()
def makeSong2(self):
pass
def makeImages(self):
"""Make spiral images in sectors and steps.
Plain, reversed,
sectorialized, negative sectorialized
outline, outline reversed, lonely
only nodes, only edges, both
"""
# make layout
self.makeLayout()
self.setAgraph()
# make function that accepts a mode, a sector
# and nodes and edges True and False
self.plotGraph()
self.plotGraph("reversed",filename="tgraphR.png")
agents=n.concatenate(self.np.sectorialized_agents__)
for i, sector in enumerate(self.np.sectorialized_agents__):
self.plotGraph("plain", sector,"sector{:02}.png".format(i))
self.plotGraph("reversed",sector,"sector{:02}R.png".format(i))
self.plotGraph("plain", n.setdiff1d(agents,sector),"sector{:02}N.png".format(i))
self.plotGraph("reversed",n.setdiff1d(agents,sector),"sector{:02}RN.png".format(i))
self.plotGraph("plain", [],"BLANK.png")
def makeImages2(self):
for i, node in enumerate(self.nm.nodes_):
self.plotGraph("plain", [node],"lonely{:09}.png".format(i))
self.plotGraph("reversed",[node],"lonely{:09}R.png".format(i))
self.plotGraph("plain", self.nm.nodes_[:i],"stair{:09}.png".format(i))
self.plotGraph("reversed",self.nm.nodes_[:i],"stair{:09}R.png".format(i))
# plotar novamente usando somente vertices e depois somente arestas
def plotGraph(self,mode="plain",nodes=None,filename="tgraph.png"):
"""Plot graph with nodes (iterable) into filename
"""
if nodes==None:
nodes=self.nodes
else:
nodes=[i for i in self.nodes if i in nodes]
for node in self.nodes:
n_=self.A.get_node(node)
if mode=="plain":
nmode=1
else:
nmode=-1
pos="{},{}".format(self.xi[::nmode][self.nm.nodes_.index(node)],self.yi[::nmode][self.nm.nodes_.index(node)])
n_.attr["pos"]=pos
n_.attr["pin"]=True
color='#%02x%02x%02x' % tuple([255*i for i in self.cm[int(self.clustering[n_]*255)][:-1]])
n_.attr['fillcolor']= color
n_.attr['fixedsize']=True
n_.attr['width']= abs(.1*(self.nm.degrees[n_]+ .5))
n_.attr['height']= abs(.1*(self.nm.degrees[n_]+.5))
n_.attr["label"]=""
if node not in nodes:
n_.attr["style"]="invis"
else:
n_.attr["style"]="filled"
for e in self.edges:
e.attr['penwidth']=3.4
e.attr["arrowsize"]=1.5
e.attr["arrowhead"]="lteeoldiamond"
e.attr["style"]=""
if sum([i in nodes for i in (e[0],e[1])])==2:
e.attr["style"]=""
else:
e.attr["style"]="invis"
tname="{}{}".format(self.basedir,filename)
print(tname)
self.A.draw(tname,prog="neato")
def setAgraph(self):
self.A=x.to_agraph(self.network)
self.A.graph_attr["viewport"]="500,500,.03"
self.edges=self.A.edges()
self.nodes=self.A.nodes()
self.cm=p.cm.Reds(range(2**10)) # color table
self.clustering=x.clustering(self.network)
def makeLayout(self):
ri=4
rf=100
nturns=3
ii=n.linspace(0,nturns*2*n.pi,self.nm.N)
rr=n.linspace(ri,rf,self.nm.N)
self.xi=(rr*n.cos(ii))
self.yi=(rr*n.sin(ii))
def makeSong(self):
"""Render abstract animation
"""
self.makeVisualSong()
self.makeAudibleSong()
if self.make_video:
self.makeAnimation()
def makeVisualSong(self):
"""Return a sequence of images and durations.
"""
self.files=os.listdir(self.basedir)
self.stairs=[i for i in self.files if ("stair" in i) and ("R" in i)]
self.sectors=[i for i in self.files if "sector" in i]
self.stairs.sort()
self.sectors.sort()
filenames=[self.basedir+i for i in self.sectors[:4]]
self.iS0=mpy.ImageSequenceClip(filenames,durations=[1.5,2.5,.5,1.5])
self.iS1=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3]],
durations=[0.25]*8)
self.iS2=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[0]],
durations=[0.75,0.25,0.75,0.25,2.]) # cai para sensível
self.iS3=mpy.ImageSequenceClip(
[self.basedir+"BLANK.png",
self.basedir+self.sectors[0],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[0],
self.basedir+self.sectors[0]],
durations=[1,0.5,2.,.25,.25,1.75, 0.25]) # [-1,8]
self.iS4=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[3], # .5
self.basedir+self.sectors[5], # .5
self.basedir+self.sectors[2], # .75
self.basedir+self.sectors[0], #.25
self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[0], # 2 8
self.basedir+self.sectors[3], # 2 7
self.basedir+self.sectors[0], # 2 -1
self.basedir+"BLANK.png",# 2
],
durations=[1,0.5,0.5,.75,
.25,1., 2.,2.,2.,2.]) # [0,7,11,0]
self.iS=mpy.concatenate_videoclips((
self.iS0,self.iS1,self.iS2,self.iS3,self.iS4))
# Clip with three first images3
# each sector a sound
# sweep from periphery to center
# all, all inverted
# sectors with inversions
def makeAudibleSong(self):
"""Use mass to render wav soundtrack.
"""
sound0=n.hstack((sy.render(220,d=1.5),
sy.render(220*(2**(7/12)),d=2.5),
sy.render(220*(2**(-5/12)),d=.5),
sy.render(220*(2**(0/12)),d=1.5),
))
sound1=n.hstack((sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
))
sound2=n.hstack((sy.render(220*(2**(0/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(-1/12)),d=2.0),
))
sound3=n.hstack((n.zeros(44100),
sy.render(220*(2**(-1/12)),d=.5),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(-1/12)),d=1.75),
sy.render(220*(2**(-1/12)),d=.25),
))
sound4=n.hstack((
sy.render(220*(2**(0/12)),d=1.),
sy.render(220*(2**(7/12)),d=.5),
sy.render(220*(2**(11/12)),d=.5),
sy.render(220*(2**(12/12)),d=.75),
sy.render(220*(2**(11/12)),d=.25),
sy.render(220*(2**(12/12)),d=1.),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(7/12)),d=2.),
sy.render(220*(2**(-1/12)),d=2.),
n.zeros(2*44100)
))
sound=n.hstack((sound0,sound1,sound2,sound3,sound4))
UT.write(sound,"sound.wav")
def makeAnimation(self):
"""Use pymovie to render (visual+audio)+text overlays.
"""
aclip=mpy.AudioFileClip("sound.wav")
self.iS=self.iS.set_audio(aclip)
self.iS.write_videofile("mixedVideo.webm",15,audio=True)
print("wrote "+"mixedVideo.webm")
|
ttm/socialLegacy
|
social/fsong.py
|
FSong.makeImages
|
python
|
def makeImages(self):
# make layout
self.makeLayout()
self.setAgraph()
# make function that accepts a mode, a sector
# and nodes and edges True and False
self.plotGraph()
self.plotGraph("reversed",filename="tgraphR.png")
agents=n.concatenate(self.np.sectorialized_agents__)
for i, sector in enumerate(self.np.sectorialized_agents__):
self.plotGraph("plain", sector,"sector{:02}.png".format(i))
self.plotGraph("reversed",sector,"sector{:02}R.png".format(i))
self.plotGraph("plain", n.setdiff1d(agents,sector),"sector{:02}N.png".format(i))
self.plotGraph("reversed",n.setdiff1d(agents,sector),"sector{:02}RN.png".format(i))
self.plotGraph("plain", [],"BLANK.png")
|
Make spiral images in sectors and steps.
Plain, reversed,
sectorialized, negative sectorialized
outline, outline reversed, lonely
only nodes, only edges, both
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fsong.py#L42-L63
|
[
"def plotGraph(self,mode=\"plain\",nodes=None,filename=\"tgraph.png\"):\n \"\"\"Plot graph with nodes (iterable) into filename\n \"\"\"\n if nodes==None:\n nodes=self.nodes\n else:\n nodes=[i for i in self.nodes if i in nodes]\n for node in self.nodes:\n n_=self.A.get_node(node)\n if mode==\"plain\":\n nmode=1\n else:\n nmode=-1\n pos=\"{},{}\".format(self.xi[::nmode][self.nm.nodes_.index(node)],self.yi[::nmode][self.nm.nodes_.index(node)])\n n_.attr[\"pos\"]=pos\n n_.attr[\"pin\"]=True\n color='#%02x%02x%02x' % tuple([255*i for i in self.cm[int(self.clustering[n_]*255)][:-1]])\n n_.attr['fillcolor']= color\n n_.attr['fixedsize']=True\n n_.attr['width']= abs(.1*(self.nm.degrees[n_]+ .5))\n n_.attr['height']= abs(.1*(self.nm.degrees[n_]+.5))\n n_.attr[\"label\"]=\"\"\n if node not in nodes:\n n_.attr[\"style\"]=\"invis\"\n else:\n n_.attr[\"style\"]=\"filled\"\n for e in self.edges:\n e.attr['penwidth']=3.4\n e.attr[\"arrowsize\"]=1.5\n e.attr[\"arrowhead\"]=\"lteeoldiamond\"\n e.attr[\"style\"]=\"\"\n if sum([i in nodes for i in (e[0],e[1])])==2:\n e.attr[\"style\"]=\"\"\n else:\n e.attr[\"style\"]=\"invis\"\n tname=\"{}{}\".format(self.basedir,filename)\n print(tname)\n self.A.draw(tname,prog=\"neato\")\n",
"def setAgraph(self):\n self.A=x.to_agraph(self.network)\n self.A.graph_attr[\"viewport\"]=\"500,500,.03\"\n self.edges=self.A.edges()\n self.nodes=self.A.nodes()\n self.cm=p.cm.Reds(range(2**10)) # color table\n self.clustering=x.clustering(self.network)\n",
"def makeLayout(self):\n ri=4\n rf=100\n nturns=3\n ii=n.linspace(0,nturns*2*n.pi,self.nm.N)\n rr=n.linspace(ri,rf,self.nm.N)\n self.xi=(rr*n.cos(ii))\n self.yi=(rr*n.sin(ii))\n"
] |
class FSong:
"""Create song from undirected (friendship) network
"""
def __init__(self, network,basedir="fsong/",clean=False,render_images=False,render_images2=False,make_video=False):
os.system("mkdir {}".format(basedir))
if clean:
os.system("rm {}*".format(basedir))
self.basedir=basedir
self.network=network
self.makePartitions()
if render_images:
self.makeImages()
self.make_video=make_video
self.makeSong()
if render_images2:
self.makeImages2()
self.makeSong2()
def makeSong2(self):
pass
def makePartitions(self):
"""Make partitions with gmane help.
"""
class NetworkMeasures:
pass
self.nm=nm=NetworkMeasures()
nm.degrees=self.network.degree()
nm.nodes_= sorted(self.network.nodes(), key=lambda x : nm.degrees[x])
nm.degrees_=[nm.degrees[i] for i in nm.nodes_]
nm.edges= self.network.edges(data=True)
nm.E=self.network.number_of_edges()
nm.N=self.network.number_of_nodes()
self.np=g.NetworkPartitioning(nm,10,metric="g")
def makeImages2(self):
for i, node in enumerate(self.nm.nodes_):
self.plotGraph("plain", [node],"lonely{:09}.png".format(i))
self.plotGraph("reversed",[node],"lonely{:09}R.png".format(i))
self.plotGraph("plain", self.nm.nodes_[:i],"stair{:09}.png".format(i))
self.plotGraph("reversed",self.nm.nodes_[:i],"stair{:09}R.png".format(i))
# plotar novamente usando somente vertices e depois somente arestas
def plotGraph(self,mode="plain",nodes=None,filename="tgraph.png"):
"""Plot graph with nodes (iterable) into filename
"""
if nodes==None:
nodes=self.nodes
else:
nodes=[i for i in self.nodes if i in nodes]
for node in self.nodes:
n_=self.A.get_node(node)
if mode=="plain":
nmode=1
else:
nmode=-1
pos="{},{}".format(self.xi[::nmode][self.nm.nodes_.index(node)],self.yi[::nmode][self.nm.nodes_.index(node)])
n_.attr["pos"]=pos
n_.attr["pin"]=True
color='#%02x%02x%02x' % tuple([255*i for i in self.cm[int(self.clustering[n_]*255)][:-1]])
n_.attr['fillcolor']= color
n_.attr['fixedsize']=True
n_.attr['width']= abs(.1*(self.nm.degrees[n_]+ .5))
n_.attr['height']= abs(.1*(self.nm.degrees[n_]+.5))
n_.attr["label"]=""
if node not in nodes:
n_.attr["style"]="invis"
else:
n_.attr["style"]="filled"
for e in self.edges:
e.attr['penwidth']=3.4
e.attr["arrowsize"]=1.5
e.attr["arrowhead"]="lteeoldiamond"
e.attr["style"]=""
if sum([i in nodes for i in (e[0],e[1])])==2:
e.attr["style"]=""
else:
e.attr["style"]="invis"
tname="{}{}".format(self.basedir,filename)
print(tname)
self.A.draw(tname,prog="neato")
def setAgraph(self):
self.A=x.to_agraph(self.network)
self.A.graph_attr["viewport"]="500,500,.03"
self.edges=self.A.edges()
self.nodes=self.A.nodes()
self.cm=p.cm.Reds(range(2**10)) # color table
self.clustering=x.clustering(self.network)
def makeLayout(self):
ri=4
rf=100
nturns=3
ii=n.linspace(0,nturns*2*n.pi,self.nm.N)
rr=n.linspace(ri,rf,self.nm.N)
self.xi=(rr*n.cos(ii))
self.yi=(rr*n.sin(ii))
def makeSong(self):
"""Render abstract animation
"""
self.makeVisualSong()
self.makeAudibleSong()
if self.make_video:
self.makeAnimation()
def makeVisualSong(self):
"""Return a sequence of images and durations.
"""
self.files=os.listdir(self.basedir)
self.stairs=[i for i in self.files if ("stair" in i) and ("R" in i)]
self.sectors=[i for i in self.files if "sector" in i]
self.stairs.sort()
self.sectors.sort()
filenames=[self.basedir+i for i in self.sectors[:4]]
self.iS0=mpy.ImageSequenceClip(filenames,durations=[1.5,2.5,.5,1.5])
self.iS1=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3]],
durations=[0.25]*8)
self.iS2=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[0]],
durations=[0.75,0.25,0.75,0.25,2.]) # cai para sensível
self.iS3=mpy.ImageSequenceClip(
[self.basedir+"BLANK.png",
self.basedir+self.sectors[0],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[0],
self.basedir+self.sectors[0]],
durations=[1,0.5,2.,.25,.25,1.75, 0.25]) # [-1,8]
self.iS4=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[3], # .5
self.basedir+self.sectors[5], # .5
self.basedir+self.sectors[2], # .75
self.basedir+self.sectors[0], #.25
self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[0], # 2 8
self.basedir+self.sectors[3], # 2 7
self.basedir+self.sectors[0], # 2 -1
self.basedir+"BLANK.png",# 2
],
durations=[1,0.5,0.5,.75,
.25,1., 2.,2.,2.,2.]) # [0,7,11,0]
self.iS=mpy.concatenate_videoclips((
self.iS0,self.iS1,self.iS2,self.iS3,self.iS4))
# Clip with three first images3
# each sector a sound
# sweep from periphery to center
# all, all inverted
# sectors with inversions
def makeAudibleSong(self):
"""Use mass to render wav soundtrack.
"""
sound0=n.hstack((sy.render(220,d=1.5),
sy.render(220*(2**(7/12)),d=2.5),
sy.render(220*(2**(-5/12)),d=.5),
sy.render(220*(2**(0/12)),d=1.5),
))
sound1=n.hstack((sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
))
sound2=n.hstack((sy.render(220*(2**(0/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(-1/12)),d=2.0),
))
sound3=n.hstack((n.zeros(44100),
sy.render(220*(2**(-1/12)),d=.5),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(-1/12)),d=1.75),
sy.render(220*(2**(-1/12)),d=.25),
))
sound4=n.hstack((
sy.render(220*(2**(0/12)),d=1.),
sy.render(220*(2**(7/12)),d=.5),
sy.render(220*(2**(11/12)),d=.5),
sy.render(220*(2**(12/12)),d=.75),
sy.render(220*(2**(11/12)),d=.25),
sy.render(220*(2**(12/12)),d=1.),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(7/12)),d=2.),
sy.render(220*(2**(-1/12)),d=2.),
n.zeros(2*44100)
))
sound=n.hstack((sound0,sound1,sound2,sound3,sound4))
UT.write(sound,"sound.wav")
def makeAnimation(self):
"""Use pymovie to render (visual+audio)+text overlays.
"""
aclip=mpy.AudioFileClip("sound.wav")
self.iS=self.iS.set_audio(aclip)
self.iS.write_videofile("mixedVideo.webm",15,audio=True)
print("wrote "+"mixedVideo.webm")
|
ttm/socialLegacy
|
social/fsong.py
|
FSong.plotGraph
|
python
|
def plotGraph(self,mode="plain",nodes=None,filename="tgraph.png"):
if nodes==None:
nodes=self.nodes
else:
nodes=[i for i in self.nodes if i in nodes]
for node in self.nodes:
n_=self.A.get_node(node)
if mode=="plain":
nmode=1
else:
nmode=-1
pos="{},{}".format(self.xi[::nmode][self.nm.nodes_.index(node)],self.yi[::nmode][self.nm.nodes_.index(node)])
n_.attr["pos"]=pos
n_.attr["pin"]=True
color='#%02x%02x%02x' % tuple([255*i for i in self.cm[int(self.clustering[n_]*255)][:-1]])
n_.attr['fillcolor']= color
n_.attr['fixedsize']=True
n_.attr['width']= abs(.1*(self.nm.degrees[n_]+ .5))
n_.attr['height']= abs(.1*(self.nm.degrees[n_]+.5))
n_.attr["label"]=""
if node not in nodes:
n_.attr["style"]="invis"
else:
n_.attr["style"]="filled"
for e in self.edges:
e.attr['penwidth']=3.4
e.attr["arrowsize"]=1.5
e.attr["arrowhead"]="lteeoldiamond"
e.attr["style"]=""
if sum([i in nodes for i in (e[0],e[1])])==2:
e.attr["style"]=""
else:
e.attr["style"]="invis"
tname="{}{}".format(self.basedir,filename)
print(tname)
self.A.draw(tname,prog="neato")
|
Plot graph with nodes (iterable) into filename
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fsong.py#L72-L109
| null |
class FSong:
"""Create song from undirected (friendship) network
"""
def __init__(self, network,basedir="fsong/",clean=False,render_images=False,render_images2=False,make_video=False):
os.system("mkdir {}".format(basedir))
if clean:
os.system("rm {}*".format(basedir))
self.basedir=basedir
self.network=network
self.makePartitions()
if render_images:
self.makeImages()
self.make_video=make_video
self.makeSong()
if render_images2:
self.makeImages2()
self.makeSong2()
def makeSong2(self):
pass
def makePartitions(self):
"""Make partitions with gmane help.
"""
class NetworkMeasures:
pass
self.nm=nm=NetworkMeasures()
nm.degrees=self.network.degree()
nm.nodes_= sorted(self.network.nodes(), key=lambda x : nm.degrees[x])
nm.degrees_=[nm.degrees[i] for i in nm.nodes_]
nm.edges= self.network.edges(data=True)
nm.E=self.network.number_of_edges()
nm.N=self.network.number_of_nodes()
self.np=g.NetworkPartitioning(nm,10,metric="g")
def makeImages(self):
"""Make spiral images in sectors and steps.
Plain, reversed,
sectorialized, negative sectorialized
outline, outline reversed, lonely
only nodes, only edges, both
"""
# make layout
self.makeLayout()
self.setAgraph()
# make function that accepts a mode, a sector
# and nodes and edges True and False
self.plotGraph()
self.plotGraph("reversed",filename="tgraphR.png")
agents=n.concatenate(self.np.sectorialized_agents__)
for i, sector in enumerate(self.np.sectorialized_agents__):
self.plotGraph("plain", sector,"sector{:02}.png".format(i))
self.plotGraph("reversed",sector,"sector{:02}R.png".format(i))
self.plotGraph("plain", n.setdiff1d(agents,sector),"sector{:02}N.png".format(i))
self.plotGraph("reversed",n.setdiff1d(agents,sector),"sector{:02}RN.png".format(i))
self.plotGraph("plain", [],"BLANK.png")
def makeImages2(self):
for i, node in enumerate(self.nm.nodes_):
self.plotGraph("plain", [node],"lonely{:09}.png".format(i))
self.plotGraph("reversed",[node],"lonely{:09}R.png".format(i))
self.plotGraph("plain", self.nm.nodes_[:i],"stair{:09}.png".format(i))
self.plotGraph("reversed",self.nm.nodes_[:i],"stair{:09}R.png".format(i))
# plotar novamente usando somente vertices e depois somente arestas
def setAgraph(self):
self.A=x.to_agraph(self.network)
self.A.graph_attr["viewport"]="500,500,.03"
self.edges=self.A.edges()
self.nodes=self.A.nodes()
self.cm=p.cm.Reds(range(2**10)) # color table
self.clustering=x.clustering(self.network)
def makeLayout(self):
ri=4
rf=100
nturns=3
ii=n.linspace(0,nturns*2*n.pi,self.nm.N)
rr=n.linspace(ri,rf,self.nm.N)
self.xi=(rr*n.cos(ii))
self.yi=(rr*n.sin(ii))
def makeSong(self):
"""Render abstract animation
"""
self.makeVisualSong()
self.makeAudibleSong()
if self.make_video:
self.makeAnimation()
def makeVisualSong(self):
"""Return a sequence of images and durations.
"""
self.files=os.listdir(self.basedir)
self.stairs=[i for i in self.files if ("stair" in i) and ("R" in i)]
self.sectors=[i for i in self.files if "sector" in i]
self.stairs.sort()
self.sectors.sort()
filenames=[self.basedir+i for i in self.sectors[:4]]
self.iS0=mpy.ImageSequenceClip(filenames,durations=[1.5,2.5,.5,1.5])
self.iS1=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3]],
durations=[0.25]*8)
self.iS2=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[0]],
durations=[0.75,0.25,0.75,0.25,2.]) # cai para sensível
self.iS3=mpy.ImageSequenceClip(
[self.basedir+"BLANK.png",
self.basedir+self.sectors[0],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[0],
self.basedir+self.sectors[0]],
durations=[1,0.5,2.,.25,.25,1.75, 0.25]) # [-1,8]
self.iS4=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[3], # .5
self.basedir+self.sectors[5], # .5
self.basedir+self.sectors[2], # .75
self.basedir+self.sectors[0], #.25
self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[0], # 2 8
self.basedir+self.sectors[3], # 2 7
self.basedir+self.sectors[0], # 2 -1
self.basedir+"BLANK.png",# 2
],
durations=[1,0.5,0.5,.75,
.25,1., 2.,2.,2.,2.]) # [0,7,11,0]
self.iS=mpy.concatenate_videoclips((
self.iS0,self.iS1,self.iS2,self.iS3,self.iS4))
# Clip with three first images3
# each sector a sound
# sweep from periphery to center
# all, all inverted
# sectors with inversions
def makeAudibleSong(self):
"""Use mass to render wav soundtrack.
"""
sound0=n.hstack((sy.render(220,d=1.5),
sy.render(220*(2**(7/12)),d=2.5),
sy.render(220*(2**(-5/12)),d=.5),
sy.render(220*(2**(0/12)),d=1.5),
))
sound1=n.hstack((sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
))
sound2=n.hstack((sy.render(220*(2**(0/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(-1/12)),d=2.0),
))
sound3=n.hstack((n.zeros(44100),
sy.render(220*(2**(-1/12)),d=.5),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(-1/12)),d=1.75),
sy.render(220*(2**(-1/12)),d=.25),
))
sound4=n.hstack((
sy.render(220*(2**(0/12)),d=1.),
sy.render(220*(2**(7/12)),d=.5),
sy.render(220*(2**(11/12)),d=.5),
sy.render(220*(2**(12/12)),d=.75),
sy.render(220*(2**(11/12)),d=.25),
sy.render(220*(2**(12/12)),d=1.),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(7/12)),d=2.),
sy.render(220*(2**(-1/12)),d=2.),
n.zeros(2*44100)
))
sound=n.hstack((sound0,sound1,sound2,sound3,sound4))
UT.write(sound,"sound.wav")
def makeAnimation(self):
"""Use pymovie to render (visual+audio)+text overlays.
"""
aclip=mpy.AudioFileClip("sound.wav")
self.iS=self.iS.set_audio(aclip)
self.iS.write_videofile("mixedVideo.webm",15,audio=True)
print("wrote "+"mixedVideo.webm")
|
ttm/socialLegacy
|
social/fsong.py
|
FSong.makeSong
|
python
|
def makeSong(self):
self.makeVisualSong()
self.makeAudibleSong()
if self.make_video:
self.makeAnimation()
|
Render abstract animation
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fsong.py#L127-L133
|
[
"def makeVisualSong(self):\n \"\"\"Return a sequence of images and durations.\n \"\"\"\n self.files=os.listdir(self.basedir)\n self.stairs=[i for i in self.files if (\"stair\" in i) and (\"R\" in i)]\n self.sectors=[i for i in self.files if \"sector\" in i]\n self.stairs.sort()\n self.sectors.sort()\n filenames=[self.basedir+i for i in self.sectors[:4]]\n self.iS0=mpy.ImageSequenceClip(filenames,durations=[1.5,2.5,.5,1.5])\n self.iS1=mpy.ImageSequenceClip(\n [self.basedir+self.sectors[2],\n self.basedir+self.sectors[3],\n self.basedir+self.sectors[2],\n self.basedir+self.sectors[3],\n self.basedir+self.sectors[2],\n self.basedir+self.sectors[3],\n self.basedir+self.sectors[2],\n self.basedir+self.sectors[3]],\n durations=[0.25]*8)\n self.iS2=mpy.ImageSequenceClip(\n [self.basedir+self.sectors[2],\n self.basedir+self.sectors[3],\n self.basedir+self.sectors[2],\n self.basedir+self.sectors[3],\n self.basedir+self.sectors[0]],\n durations=[0.75,0.25,0.75,0.25,2.]) # cai para sensível\n\n self.iS3=mpy.ImageSequenceClip(\n [self.basedir+\"BLANK.png\",\n self.basedir+self.sectors[0],\n self.basedir+self.sectors[1],\n self.basedir+self.sectors[1],\n self.basedir+self.sectors[1],\n self.basedir+self.sectors[0],\n self.basedir+self.sectors[0]],\n durations=[1,0.5,2.,.25,.25,1.75, 0.25]) # [-1,8]\n\n self.iS4=mpy.ImageSequenceClip(\n [self.basedir+self.sectors[2], # 1\n self.basedir+self.sectors[3], # .5\n self.basedir+self.sectors[5], # .5\n self.basedir+self.sectors[2], # .75\n self.basedir+self.sectors[0], #.25\n self.basedir+self.sectors[2], # 1\n self.basedir+self.sectors[0], # 2 8\n self.basedir+self.sectors[3], # 2 7\n self.basedir+self.sectors[0], # 2 -1\n self.basedir+\"BLANK.png\",# 2\n ],\n durations=[1,0.5,0.5,.75,\n .25,1., 2.,2.,2.,2.]) # [0,7,11,0]\n\n self.iS=mpy.concatenate_videoclips((\n self.iS0,self.iS1,self.iS2,self.iS3,self.iS4))\n",
"def makeAudibleSong(self):\n \"\"\"Use mass to render wav soundtrack.\n \"\"\"\n sound0=n.hstack((sy.render(220,d=1.5),\n sy.render(220*(2**(7/12)),d=2.5),\n sy.render(220*(2**(-5/12)),d=.5),\n sy.render(220*(2**(0/12)),d=1.5),\n ))\n sound1=n.hstack((sy.render(220*(2**(0/12)),d=.25),\n sy.render(220*(2**(7/12)),d=.25),\n sy.render(220*(2**(0/12)),d=.25),\n sy.render(220*(2**(7/12)),d=.25),\n sy.render(220*(2**(0/12)),d=.25),\n sy.render(220*(2**(7/12)),d=.25),\n sy.render(220*(2**(0/12)),d=.25),\n sy.render(220*(2**(7/12)),d=.25),\n ))\n sound2=n.hstack((sy.render(220*(2**(0/12)),d=.75),\n sy.render(220*(2**(0/12)),d=.25),\n sy.render(220*(2**(7/12)),d=.75),\n sy.render(220*(2**(0/12)),d=.25),\n sy.render(220*(2**(-1/12)),d=2.0),\n ))\n sound3=n.hstack((n.zeros(44100),\n sy.render(220*(2**(-1/12)),d=.5),\n sy.render(220*(2**(8/12)),d=2.),\n sy.render(220*(2**(8/12)),d=.25),\n sy.render(220*(2**(8/12)),d=.25),\n sy.render(220*(2**(-1/12)),d=1.75),\n sy.render(220*(2**(-1/12)),d=.25),\n ))\n sound4=n.hstack((\n sy.render(220*(2**(0/12)),d=1.),\n sy.render(220*(2**(7/12)),d=.5),\n sy.render(220*(2**(11/12)),d=.5),\n sy.render(220*(2**(12/12)),d=.75),\n sy.render(220*(2**(11/12)),d=.25),\n sy.render(220*(2**(12/12)),d=1.),\n sy.render(220*(2**(8/12)),d=2.),\n sy.render(220*(2**(7/12)),d=2.),\n sy.render(220*(2**(-1/12)),d=2.),\n n.zeros(2*44100)\n ))\n\n sound=n.hstack((sound0,sound1,sound2,sound3,sound4))\n UT.write(sound,\"sound.wav\")\n",
"def makeAnimation(self):\n \"\"\"Use pymovie to render (visual+audio)+text overlays.\n \"\"\"\n aclip=mpy.AudioFileClip(\"sound.wav\")\n self.iS=self.iS.set_audio(aclip)\n self.iS.write_videofile(\"mixedVideo.webm\",15,audio=True)\n print(\"wrote \"+\"mixedVideo.webm\")\n"
] |
class FSong:
"""Create song from undirected (friendship) network
"""
def __init__(self, network,basedir="fsong/",clean=False,render_images=False,render_images2=False,make_video=False):
os.system("mkdir {}".format(basedir))
if clean:
os.system("rm {}*".format(basedir))
self.basedir=basedir
self.network=network
self.makePartitions()
if render_images:
self.makeImages()
self.make_video=make_video
self.makeSong()
if render_images2:
self.makeImages2()
self.makeSong2()
def makeSong2(self):
pass
def makePartitions(self):
"""Make partitions with gmane help.
"""
class NetworkMeasures:
pass
self.nm=nm=NetworkMeasures()
nm.degrees=self.network.degree()
nm.nodes_= sorted(self.network.nodes(), key=lambda x : nm.degrees[x])
nm.degrees_=[nm.degrees[i] for i in nm.nodes_]
nm.edges= self.network.edges(data=True)
nm.E=self.network.number_of_edges()
nm.N=self.network.number_of_nodes()
self.np=g.NetworkPartitioning(nm,10,metric="g")
def makeImages(self):
"""Make spiral images in sectors and steps.
Plain, reversed,
sectorialized, negative sectorialized
outline, outline reversed, lonely
only nodes, only edges, both
"""
# make layout
self.makeLayout()
self.setAgraph()
# make function that accepts a mode, a sector
# and nodes and edges True and False
self.plotGraph()
self.plotGraph("reversed",filename="tgraphR.png")
agents=n.concatenate(self.np.sectorialized_agents__)
for i, sector in enumerate(self.np.sectorialized_agents__):
self.plotGraph("plain", sector,"sector{:02}.png".format(i))
self.plotGraph("reversed",sector,"sector{:02}R.png".format(i))
self.plotGraph("plain", n.setdiff1d(agents,sector),"sector{:02}N.png".format(i))
self.plotGraph("reversed",n.setdiff1d(agents,sector),"sector{:02}RN.png".format(i))
self.plotGraph("plain", [],"BLANK.png")
def makeImages2(self):
for i, node in enumerate(self.nm.nodes_):
self.plotGraph("plain", [node],"lonely{:09}.png".format(i))
self.plotGraph("reversed",[node],"lonely{:09}R.png".format(i))
self.plotGraph("plain", self.nm.nodes_[:i],"stair{:09}.png".format(i))
self.plotGraph("reversed",self.nm.nodes_[:i],"stair{:09}R.png".format(i))
# plotar novamente usando somente vertices e depois somente arestas
def plotGraph(self,mode="plain",nodes=None,filename="tgraph.png"):
"""Plot graph with nodes (iterable) into filename
"""
if nodes==None:
nodes=self.nodes
else:
nodes=[i for i in self.nodes if i in nodes]
for node in self.nodes:
n_=self.A.get_node(node)
if mode=="plain":
nmode=1
else:
nmode=-1
pos="{},{}".format(self.xi[::nmode][self.nm.nodes_.index(node)],self.yi[::nmode][self.nm.nodes_.index(node)])
n_.attr["pos"]=pos
n_.attr["pin"]=True
color='#%02x%02x%02x' % tuple([255*i for i in self.cm[int(self.clustering[n_]*255)][:-1]])
n_.attr['fillcolor']= color
n_.attr['fixedsize']=True
n_.attr['width']= abs(.1*(self.nm.degrees[n_]+ .5))
n_.attr['height']= abs(.1*(self.nm.degrees[n_]+.5))
n_.attr["label"]=""
if node not in nodes:
n_.attr["style"]="invis"
else:
n_.attr["style"]="filled"
for e in self.edges:
e.attr['penwidth']=3.4
e.attr["arrowsize"]=1.5
e.attr["arrowhead"]="lteeoldiamond"
e.attr["style"]=""
if sum([i in nodes for i in (e[0],e[1])])==2:
e.attr["style"]=""
else:
e.attr["style"]="invis"
tname="{}{}".format(self.basedir,filename)
print(tname)
self.A.draw(tname,prog="neato")
def setAgraph(self):
self.A=x.to_agraph(self.network)
self.A.graph_attr["viewport"]="500,500,.03"
self.edges=self.A.edges()
self.nodes=self.A.nodes()
self.cm=p.cm.Reds(range(2**10)) # color table
self.clustering=x.clustering(self.network)
def makeLayout(self):
ri=4
rf=100
nturns=3
ii=n.linspace(0,nturns*2*n.pi,self.nm.N)
rr=n.linspace(ri,rf,self.nm.N)
self.xi=(rr*n.cos(ii))
self.yi=(rr*n.sin(ii))
def makeVisualSong(self):
"""Return a sequence of images and durations.
"""
self.files=os.listdir(self.basedir)
self.stairs=[i for i in self.files if ("stair" in i) and ("R" in i)]
self.sectors=[i for i in self.files if "sector" in i]
self.stairs.sort()
self.sectors.sort()
filenames=[self.basedir+i for i in self.sectors[:4]]
self.iS0=mpy.ImageSequenceClip(filenames,durations=[1.5,2.5,.5,1.5])
self.iS1=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3]],
durations=[0.25]*8)
self.iS2=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[0]],
durations=[0.75,0.25,0.75,0.25,2.]) # cai para sensível
self.iS3=mpy.ImageSequenceClip(
[self.basedir+"BLANK.png",
self.basedir+self.sectors[0],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[0],
self.basedir+self.sectors[0]],
durations=[1,0.5,2.,.25,.25,1.75, 0.25]) # [-1,8]
self.iS4=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[3], # .5
self.basedir+self.sectors[5], # .5
self.basedir+self.sectors[2], # .75
self.basedir+self.sectors[0], #.25
self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[0], # 2 8
self.basedir+self.sectors[3], # 2 7
self.basedir+self.sectors[0], # 2 -1
self.basedir+"BLANK.png",# 2
],
durations=[1,0.5,0.5,.75,
.25,1., 2.,2.,2.,2.]) # [0,7,11,0]
self.iS=mpy.concatenate_videoclips((
self.iS0,self.iS1,self.iS2,self.iS3,self.iS4))
# Clip with three first images3
# each sector a sound
# sweep from periphery to center
# all, all inverted
# sectors with inversions
def makeAudibleSong(self):
"""Use mass to render wav soundtrack.
"""
sound0=n.hstack((sy.render(220,d=1.5),
sy.render(220*(2**(7/12)),d=2.5),
sy.render(220*(2**(-5/12)),d=.5),
sy.render(220*(2**(0/12)),d=1.5),
))
sound1=n.hstack((sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
))
sound2=n.hstack((sy.render(220*(2**(0/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(-1/12)),d=2.0),
))
sound3=n.hstack((n.zeros(44100),
sy.render(220*(2**(-1/12)),d=.5),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(-1/12)),d=1.75),
sy.render(220*(2**(-1/12)),d=.25),
))
sound4=n.hstack((
sy.render(220*(2**(0/12)),d=1.),
sy.render(220*(2**(7/12)),d=.5),
sy.render(220*(2**(11/12)),d=.5),
sy.render(220*(2**(12/12)),d=.75),
sy.render(220*(2**(11/12)),d=.25),
sy.render(220*(2**(12/12)),d=1.),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(7/12)),d=2.),
sy.render(220*(2**(-1/12)),d=2.),
n.zeros(2*44100)
))
sound=n.hstack((sound0,sound1,sound2,sound3,sound4))
UT.write(sound,"sound.wav")
def makeAnimation(self):
"""Use pymovie to render (visual+audio)+text overlays.
"""
aclip=mpy.AudioFileClip("sound.wav")
self.iS=self.iS.set_audio(aclip)
self.iS.write_videofile("mixedVideo.webm",15,audio=True)
print("wrote "+"mixedVideo.webm")
|
ttm/socialLegacy
|
social/fsong.py
|
FSong.makeVisualSong
|
python
|
def makeVisualSong(self):
self.files=os.listdir(self.basedir)
self.stairs=[i for i in self.files if ("stair" in i) and ("R" in i)]
self.sectors=[i for i in self.files if "sector" in i]
self.stairs.sort()
self.sectors.sort()
filenames=[self.basedir+i for i in self.sectors[:4]]
self.iS0=mpy.ImageSequenceClip(filenames,durations=[1.5,2.5,.5,1.5])
self.iS1=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3]],
durations=[0.25]*8)
self.iS2=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[0]],
durations=[0.75,0.25,0.75,0.25,2.]) # cai para sensível
self.iS3=mpy.ImageSequenceClip(
[self.basedir+"BLANK.png",
self.basedir+self.sectors[0],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[0],
self.basedir+self.sectors[0]],
durations=[1,0.5,2.,.25,.25,1.75, 0.25]) # [-1,8]
self.iS4=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[3], # .5
self.basedir+self.sectors[5], # .5
self.basedir+self.sectors[2], # .75
self.basedir+self.sectors[0], #.25
self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[0], # 2 8
self.basedir+self.sectors[3], # 2 7
self.basedir+self.sectors[0], # 2 -1
self.basedir+"BLANK.png",# 2
],
durations=[1,0.5,0.5,.75,
.25,1., 2.,2.,2.,2.]) # [0,7,11,0]
self.iS=mpy.concatenate_videoclips((
self.iS0,self.iS1,self.iS2,self.iS3,self.iS4))
|
Return a sequence of images and durations.
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fsong.py#L134-L188
| null |
class FSong:
"""Create song from undirected (friendship) network
"""
def __init__(self, network,basedir="fsong/",clean=False,render_images=False,render_images2=False,make_video=False):
os.system("mkdir {}".format(basedir))
if clean:
os.system("rm {}*".format(basedir))
self.basedir=basedir
self.network=network
self.makePartitions()
if render_images:
self.makeImages()
self.make_video=make_video
self.makeSong()
if render_images2:
self.makeImages2()
self.makeSong2()
def makeSong2(self):
pass
def makePartitions(self):
"""Make partitions with gmane help.
"""
class NetworkMeasures:
pass
self.nm=nm=NetworkMeasures()
nm.degrees=self.network.degree()
nm.nodes_= sorted(self.network.nodes(), key=lambda x : nm.degrees[x])
nm.degrees_=[nm.degrees[i] for i in nm.nodes_]
nm.edges= self.network.edges(data=True)
nm.E=self.network.number_of_edges()
nm.N=self.network.number_of_nodes()
self.np=g.NetworkPartitioning(nm,10,metric="g")
def makeImages(self):
"""Make spiral images in sectors and steps.
Plain, reversed,
sectorialized, negative sectorialized
outline, outline reversed, lonely
only nodes, only edges, both
"""
# make layout
self.makeLayout()
self.setAgraph()
# make function that accepts a mode, a sector
# and nodes and edges True and False
self.plotGraph()
self.plotGraph("reversed",filename="tgraphR.png")
agents=n.concatenate(self.np.sectorialized_agents__)
for i, sector in enumerate(self.np.sectorialized_agents__):
self.plotGraph("plain", sector,"sector{:02}.png".format(i))
self.plotGraph("reversed",sector,"sector{:02}R.png".format(i))
self.plotGraph("plain", n.setdiff1d(agents,sector),"sector{:02}N.png".format(i))
self.plotGraph("reversed",n.setdiff1d(agents,sector),"sector{:02}RN.png".format(i))
self.plotGraph("plain", [],"BLANK.png")
def makeImages2(self):
for i, node in enumerate(self.nm.nodes_):
self.plotGraph("plain", [node],"lonely{:09}.png".format(i))
self.plotGraph("reversed",[node],"lonely{:09}R.png".format(i))
self.plotGraph("plain", self.nm.nodes_[:i],"stair{:09}.png".format(i))
self.plotGraph("reversed",self.nm.nodes_[:i],"stair{:09}R.png".format(i))
# plotar novamente usando somente vertices e depois somente arestas
def plotGraph(self,mode="plain",nodes=None,filename="tgraph.png"):
"""Plot graph with nodes (iterable) into filename
"""
if nodes==None:
nodes=self.nodes
else:
nodes=[i for i in self.nodes if i in nodes]
for node in self.nodes:
n_=self.A.get_node(node)
if mode=="plain":
nmode=1
else:
nmode=-1
pos="{},{}".format(self.xi[::nmode][self.nm.nodes_.index(node)],self.yi[::nmode][self.nm.nodes_.index(node)])
n_.attr["pos"]=pos
n_.attr["pin"]=True
color='#%02x%02x%02x' % tuple([255*i for i in self.cm[int(self.clustering[n_]*255)][:-1]])
n_.attr['fillcolor']= color
n_.attr['fixedsize']=True
n_.attr['width']= abs(.1*(self.nm.degrees[n_]+ .5))
n_.attr['height']= abs(.1*(self.nm.degrees[n_]+.5))
n_.attr["label"]=""
if node not in nodes:
n_.attr["style"]="invis"
else:
n_.attr["style"]="filled"
for e in self.edges:
e.attr['penwidth']=3.4
e.attr["arrowsize"]=1.5
e.attr["arrowhead"]="lteeoldiamond"
e.attr["style"]=""
if sum([i in nodes for i in (e[0],e[1])])==2:
e.attr["style"]=""
else:
e.attr["style"]="invis"
tname="{}{}".format(self.basedir,filename)
print(tname)
self.A.draw(tname,prog="neato")
def setAgraph(self):
self.A=x.to_agraph(self.network)
self.A.graph_attr["viewport"]="500,500,.03"
self.edges=self.A.edges()
self.nodes=self.A.nodes()
self.cm=p.cm.Reds(range(2**10)) # color table
self.clustering=x.clustering(self.network)
def makeLayout(self):
ri=4
rf=100
nturns=3
ii=n.linspace(0,nturns*2*n.pi,self.nm.N)
rr=n.linspace(ri,rf,self.nm.N)
self.xi=(rr*n.cos(ii))
self.yi=(rr*n.sin(ii))
def makeSong(self):
"""Render abstract animation
"""
self.makeVisualSong()
self.makeAudibleSong()
if self.make_video:
self.makeAnimation()
# Clip with three first images3
# each sector a sound
# sweep from periphery to center
# all, all inverted
# sectors with inversions
def makeAudibleSong(self):
"""Use mass to render wav soundtrack.
"""
sound0=n.hstack((sy.render(220,d=1.5),
sy.render(220*(2**(7/12)),d=2.5),
sy.render(220*(2**(-5/12)),d=.5),
sy.render(220*(2**(0/12)),d=1.5),
))
sound1=n.hstack((sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
))
sound2=n.hstack((sy.render(220*(2**(0/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(-1/12)),d=2.0),
))
sound3=n.hstack((n.zeros(44100),
sy.render(220*(2**(-1/12)),d=.5),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(-1/12)),d=1.75),
sy.render(220*(2**(-1/12)),d=.25),
))
sound4=n.hstack((
sy.render(220*(2**(0/12)),d=1.),
sy.render(220*(2**(7/12)),d=.5),
sy.render(220*(2**(11/12)),d=.5),
sy.render(220*(2**(12/12)),d=.75),
sy.render(220*(2**(11/12)),d=.25),
sy.render(220*(2**(12/12)),d=1.),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(7/12)),d=2.),
sy.render(220*(2**(-1/12)),d=2.),
n.zeros(2*44100)
))
sound=n.hstack((sound0,sound1,sound2,sound3,sound4))
UT.write(sound,"sound.wav")
def makeAnimation(self):
"""Use pymovie to render (visual+audio)+text overlays.
"""
aclip=mpy.AudioFileClip("sound.wav")
self.iS=self.iS.set_audio(aclip)
self.iS.write_videofile("mixedVideo.webm",15,audio=True)
print("wrote "+"mixedVideo.webm")
|
ttm/socialLegacy
|
social/fsong.py
|
FSong.makeAudibleSong
|
python
|
def makeAudibleSong(self):
sound0=n.hstack((sy.render(220,d=1.5),
sy.render(220*(2**(7/12)),d=2.5),
sy.render(220*(2**(-5/12)),d=.5),
sy.render(220*(2**(0/12)),d=1.5),
))
sound1=n.hstack((sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
))
sound2=n.hstack((sy.render(220*(2**(0/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(-1/12)),d=2.0),
))
sound3=n.hstack((n.zeros(44100),
sy.render(220*(2**(-1/12)),d=.5),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(-1/12)),d=1.75),
sy.render(220*(2**(-1/12)),d=.25),
))
sound4=n.hstack((
sy.render(220*(2**(0/12)),d=1.),
sy.render(220*(2**(7/12)),d=.5),
sy.render(220*(2**(11/12)),d=.5),
sy.render(220*(2**(12/12)),d=.75),
sy.render(220*(2**(11/12)),d=.25),
sy.render(220*(2**(12/12)),d=1.),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(7/12)),d=2.),
sy.render(220*(2**(-1/12)),d=2.),
n.zeros(2*44100)
))
sound=n.hstack((sound0,sound1,sound2,sound3,sound4))
UT.write(sound,"sound.wav")
|
Use mass to render wav soundtrack.
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fsong.py#L194-L239
| null |
class FSong:
"""Create song from undirected (friendship) network
"""
def __init__(self, network,basedir="fsong/",clean=False,render_images=False,render_images2=False,make_video=False):
os.system("mkdir {}".format(basedir))
if clean:
os.system("rm {}*".format(basedir))
self.basedir=basedir
self.network=network
self.makePartitions()
if render_images:
self.makeImages()
self.make_video=make_video
self.makeSong()
if render_images2:
self.makeImages2()
self.makeSong2()
def makeSong2(self):
pass
def makePartitions(self):
"""Make partitions with gmane help.
"""
class NetworkMeasures:
pass
self.nm=nm=NetworkMeasures()
nm.degrees=self.network.degree()
nm.nodes_= sorted(self.network.nodes(), key=lambda x : nm.degrees[x])
nm.degrees_=[nm.degrees[i] for i in nm.nodes_]
nm.edges= self.network.edges(data=True)
nm.E=self.network.number_of_edges()
nm.N=self.network.number_of_nodes()
self.np=g.NetworkPartitioning(nm,10,metric="g")
def makeImages(self):
"""Make spiral images in sectors and steps.
Plain, reversed,
sectorialized, negative sectorialized
outline, outline reversed, lonely
only nodes, only edges, both
"""
# make layout
self.makeLayout()
self.setAgraph()
# make function that accepts a mode, a sector
# and nodes and edges True and False
self.plotGraph()
self.plotGraph("reversed",filename="tgraphR.png")
agents=n.concatenate(self.np.sectorialized_agents__)
for i, sector in enumerate(self.np.sectorialized_agents__):
self.plotGraph("plain", sector,"sector{:02}.png".format(i))
self.plotGraph("reversed",sector,"sector{:02}R.png".format(i))
self.plotGraph("plain", n.setdiff1d(agents,sector),"sector{:02}N.png".format(i))
self.plotGraph("reversed",n.setdiff1d(agents,sector),"sector{:02}RN.png".format(i))
self.plotGraph("plain", [],"BLANK.png")
def makeImages2(self):
for i, node in enumerate(self.nm.nodes_):
self.plotGraph("plain", [node],"lonely{:09}.png".format(i))
self.plotGraph("reversed",[node],"lonely{:09}R.png".format(i))
self.plotGraph("plain", self.nm.nodes_[:i],"stair{:09}.png".format(i))
self.plotGraph("reversed",self.nm.nodes_[:i],"stair{:09}R.png".format(i))
# plotar novamente usando somente vertices e depois somente arestas
def plotGraph(self,mode="plain",nodes=None,filename="tgraph.png"):
"""Plot graph with nodes (iterable) into filename
"""
if nodes==None:
nodes=self.nodes
else:
nodes=[i for i in self.nodes if i in nodes]
for node in self.nodes:
n_=self.A.get_node(node)
if mode=="plain":
nmode=1
else:
nmode=-1
pos="{},{}".format(self.xi[::nmode][self.nm.nodes_.index(node)],self.yi[::nmode][self.nm.nodes_.index(node)])
n_.attr["pos"]=pos
n_.attr["pin"]=True
color='#%02x%02x%02x' % tuple([255*i for i in self.cm[int(self.clustering[n_]*255)][:-1]])
n_.attr['fillcolor']= color
n_.attr['fixedsize']=True
n_.attr['width']= abs(.1*(self.nm.degrees[n_]+ .5))
n_.attr['height']= abs(.1*(self.nm.degrees[n_]+.5))
n_.attr["label"]=""
if node not in nodes:
n_.attr["style"]="invis"
else:
n_.attr["style"]="filled"
for e in self.edges:
e.attr['penwidth']=3.4
e.attr["arrowsize"]=1.5
e.attr["arrowhead"]="lteeoldiamond"
e.attr["style"]=""
if sum([i in nodes for i in (e[0],e[1])])==2:
e.attr["style"]=""
else:
e.attr["style"]="invis"
tname="{}{}".format(self.basedir,filename)
print(tname)
self.A.draw(tname,prog="neato")
def setAgraph(self):
self.A=x.to_agraph(self.network)
self.A.graph_attr["viewport"]="500,500,.03"
self.edges=self.A.edges()
self.nodes=self.A.nodes()
self.cm=p.cm.Reds(range(2**10)) # color table
self.clustering=x.clustering(self.network)
def makeLayout(self):
ri=4
rf=100
nturns=3
ii=n.linspace(0,nturns*2*n.pi,self.nm.N)
rr=n.linspace(ri,rf,self.nm.N)
self.xi=(rr*n.cos(ii))
self.yi=(rr*n.sin(ii))
def makeSong(self):
"""Render abstract animation
"""
self.makeVisualSong()
self.makeAudibleSong()
if self.make_video:
self.makeAnimation()
def makeVisualSong(self):
"""Return a sequence of images and durations.
"""
self.files=os.listdir(self.basedir)
self.stairs=[i for i in self.files if ("stair" in i) and ("R" in i)]
self.sectors=[i for i in self.files if "sector" in i]
self.stairs.sort()
self.sectors.sort()
filenames=[self.basedir+i for i in self.sectors[:4]]
self.iS0=mpy.ImageSequenceClip(filenames,durations=[1.5,2.5,.5,1.5])
self.iS1=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3]],
durations=[0.25]*8)
self.iS2=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[0]],
durations=[0.75,0.25,0.75,0.25,2.]) # cai para sensível
self.iS3=mpy.ImageSequenceClip(
[self.basedir+"BLANK.png",
self.basedir+self.sectors[0],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[0],
self.basedir+self.sectors[0]],
durations=[1,0.5,2.,.25,.25,1.75, 0.25]) # [-1,8]
self.iS4=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[3], # .5
self.basedir+self.sectors[5], # .5
self.basedir+self.sectors[2], # .75
self.basedir+self.sectors[0], #.25
self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[0], # 2 8
self.basedir+self.sectors[3], # 2 7
self.basedir+self.sectors[0], # 2 -1
self.basedir+"BLANK.png",# 2
],
durations=[1,0.5,0.5,.75,
.25,1., 2.,2.,2.,2.]) # [0,7,11,0]
self.iS=mpy.concatenate_videoclips((
self.iS0,self.iS1,self.iS2,self.iS3,self.iS4))
# Clip with three first images3
# each sector a sound
# sweep from periphery to center
# all, all inverted
# sectors with inversions
def makeAnimation(self):
"""Use pymovie to render (visual+audio)+text overlays.
"""
aclip=mpy.AudioFileClip("sound.wav")
self.iS=self.iS.set_audio(aclip)
self.iS.write_videofile("mixedVideo.webm",15,audio=True)
print("wrote "+"mixedVideo.webm")
|
ttm/socialLegacy
|
social/fsong.py
|
FSong.makeAnimation
|
python
|
def makeAnimation(self):
aclip=mpy.AudioFileClip("sound.wav")
self.iS=self.iS.set_audio(aclip)
self.iS.write_videofile("mixedVideo.webm",15,audio=True)
print("wrote "+"mixedVideo.webm")
|
Use pymovie to render (visual+audio)+text overlays.
|
train
|
https://github.com/ttm/socialLegacy/blob/c0930cfe6e84392729449bf7c92569e1556fd109/social/fsong.py#L240-L246
| null |
class FSong:
"""Create song from undirected (friendship) network
"""
def __init__(self, network,basedir="fsong/",clean=False,render_images=False,render_images2=False,make_video=False):
os.system("mkdir {}".format(basedir))
if clean:
os.system("rm {}*".format(basedir))
self.basedir=basedir
self.network=network
self.makePartitions()
if render_images:
self.makeImages()
self.make_video=make_video
self.makeSong()
if render_images2:
self.makeImages2()
self.makeSong2()
def makeSong2(self):
pass
def makePartitions(self):
"""Make partitions with gmane help.
"""
class NetworkMeasures:
pass
self.nm=nm=NetworkMeasures()
nm.degrees=self.network.degree()
nm.nodes_= sorted(self.network.nodes(), key=lambda x : nm.degrees[x])
nm.degrees_=[nm.degrees[i] for i in nm.nodes_]
nm.edges= self.network.edges(data=True)
nm.E=self.network.number_of_edges()
nm.N=self.network.number_of_nodes()
self.np=g.NetworkPartitioning(nm,10,metric="g")
def makeImages(self):
"""Make spiral images in sectors and steps.
Plain, reversed,
sectorialized, negative sectorialized
outline, outline reversed, lonely
only nodes, only edges, both
"""
# make layout
self.makeLayout()
self.setAgraph()
# make function that accepts a mode, a sector
# and nodes and edges True and False
self.plotGraph()
self.plotGraph("reversed",filename="tgraphR.png")
agents=n.concatenate(self.np.sectorialized_agents__)
for i, sector in enumerate(self.np.sectorialized_agents__):
self.plotGraph("plain", sector,"sector{:02}.png".format(i))
self.plotGraph("reversed",sector,"sector{:02}R.png".format(i))
self.plotGraph("plain", n.setdiff1d(agents,sector),"sector{:02}N.png".format(i))
self.plotGraph("reversed",n.setdiff1d(agents,sector),"sector{:02}RN.png".format(i))
self.plotGraph("plain", [],"BLANK.png")
def makeImages2(self):
for i, node in enumerate(self.nm.nodes_):
self.plotGraph("plain", [node],"lonely{:09}.png".format(i))
self.plotGraph("reversed",[node],"lonely{:09}R.png".format(i))
self.plotGraph("plain", self.nm.nodes_[:i],"stair{:09}.png".format(i))
self.plotGraph("reversed",self.nm.nodes_[:i],"stair{:09}R.png".format(i))
# plotar novamente usando somente vertices e depois somente arestas
def plotGraph(self,mode="plain",nodes=None,filename="tgraph.png"):
"""Plot graph with nodes (iterable) into filename
"""
if nodes==None:
nodes=self.nodes
else:
nodes=[i for i in self.nodes if i in nodes]
for node in self.nodes:
n_=self.A.get_node(node)
if mode=="plain":
nmode=1
else:
nmode=-1
pos="{},{}".format(self.xi[::nmode][self.nm.nodes_.index(node)],self.yi[::nmode][self.nm.nodes_.index(node)])
n_.attr["pos"]=pos
n_.attr["pin"]=True
color='#%02x%02x%02x' % tuple([255*i for i in self.cm[int(self.clustering[n_]*255)][:-1]])
n_.attr['fillcolor']= color
n_.attr['fixedsize']=True
n_.attr['width']= abs(.1*(self.nm.degrees[n_]+ .5))
n_.attr['height']= abs(.1*(self.nm.degrees[n_]+.5))
n_.attr["label"]=""
if node not in nodes:
n_.attr["style"]="invis"
else:
n_.attr["style"]="filled"
for e in self.edges:
e.attr['penwidth']=3.4
e.attr["arrowsize"]=1.5
e.attr["arrowhead"]="lteeoldiamond"
e.attr["style"]=""
if sum([i in nodes for i in (e[0],e[1])])==2:
e.attr["style"]=""
else:
e.attr["style"]="invis"
tname="{}{}".format(self.basedir,filename)
print(tname)
self.A.draw(tname,prog="neato")
def setAgraph(self):
self.A=x.to_agraph(self.network)
self.A.graph_attr["viewport"]="500,500,.03"
self.edges=self.A.edges()
self.nodes=self.A.nodes()
self.cm=p.cm.Reds(range(2**10)) # color table
self.clustering=x.clustering(self.network)
def makeLayout(self):
ri=4
rf=100
nturns=3
ii=n.linspace(0,nturns*2*n.pi,self.nm.N)
rr=n.linspace(ri,rf,self.nm.N)
self.xi=(rr*n.cos(ii))
self.yi=(rr*n.sin(ii))
def makeSong(self):
"""Render abstract animation
"""
self.makeVisualSong()
self.makeAudibleSong()
if self.make_video:
self.makeAnimation()
def makeVisualSong(self):
"""Return a sequence of images and durations.
"""
self.files=os.listdir(self.basedir)
self.stairs=[i for i in self.files if ("stair" in i) and ("R" in i)]
self.sectors=[i for i in self.files if "sector" in i]
self.stairs.sort()
self.sectors.sort()
filenames=[self.basedir+i for i in self.sectors[:4]]
self.iS0=mpy.ImageSequenceClip(filenames,durations=[1.5,2.5,.5,1.5])
self.iS1=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3]],
durations=[0.25]*8)
self.iS2=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[2],
self.basedir+self.sectors[3],
self.basedir+self.sectors[0]],
durations=[0.75,0.25,0.75,0.25,2.]) # cai para sensível
self.iS3=mpy.ImageSequenceClip(
[self.basedir+"BLANK.png",
self.basedir+self.sectors[0],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[1],
self.basedir+self.sectors[0],
self.basedir+self.sectors[0]],
durations=[1,0.5,2.,.25,.25,1.75, 0.25]) # [-1,8]
self.iS4=mpy.ImageSequenceClip(
[self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[3], # .5
self.basedir+self.sectors[5], # .5
self.basedir+self.sectors[2], # .75
self.basedir+self.sectors[0], #.25
self.basedir+self.sectors[2], # 1
self.basedir+self.sectors[0], # 2 8
self.basedir+self.sectors[3], # 2 7
self.basedir+self.sectors[0], # 2 -1
self.basedir+"BLANK.png",# 2
],
durations=[1,0.5,0.5,.75,
.25,1., 2.,2.,2.,2.]) # [0,7,11,0]
self.iS=mpy.concatenate_videoclips((
self.iS0,self.iS1,self.iS2,self.iS3,self.iS4))
# Clip with three first images3
# each sector a sound
# sweep from periphery to center
# all, all inverted
# sectors with inversions
def makeAudibleSong(self):
"""Use mass to render wav soundtrack.
"""
sound0=n.hstack((sy.render(220,d=1.5),
sy.render(220*(2**(7/12)),d=2.5),
sy.render(220*(2**(-5/12)),d=.5),
sy.render(220*(2**(0/12)),d=1.5),
))
sound1=n.hstack((sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.25),
))
sound2=n.hstack((sy.render(220*(2**(0/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(7/12)),d=.75),
sy.render(220*(2**(0/12)),d=.25),
sy.render(220*(2**(-1/12)),d=2.0),
))
sound3=n.hstack((n.zeros(44100),
sy.render(220*(2**(-1/12)),d=.5),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(8/12)),d=.25),
sy.render(220*(2**(-1/12)),d=1.75),
sy.render(220*(2**(-1/12)),d=.25),
))
sound4=n.hstack((
sy.render(220*(2**(0/12)),d=1.),
sy.render(220*(2**(7/12)),d=.5),
sy.render(220*(2**(11/12)),d=.5),
sy.render(220*(2**(12/12)),d=.75),
sy.render(220*(2**(11/12)),d=.25),
sy.render(220*(2**(12/12)),d=1.),
sy.render(220*(2**(8/12)),d=2.),
sy.render(220*(2**(7/12)),d=2.),
sy.render(220*(2**(-1/12)),d=2.),
n.zeros(2*44100)
))
sound=n.hstack((sound0,sound1,sound2,sound3,sound4))
UT.write(sound,"sound.wav")
|
harlowja/failure
|
failure/finders.py
|
match_modules
|
python
|
def match_modules(allowed_modules):
cleaned_allowed_modules = [
utils.mod_to_mod_name(tmp_mod)
for tmp_mod in allowed_modules
]
cleaned_split_allowed_modules = [
tmp_mod.split(".")
for tmp_mod in cleaned_allowed_modules
]
cleaned_allowed_modules = []
del cleaned_allowed_modules
def matcher(cause):
cause_cls = None
cause_type_name = cause.exception_type_names[0]
# Rip off the class name (usually at the end).
cause_type_name_pieces = cause_type_name.split(".")
cause_type_name_mod_pieces = cause_type_name_pieces[0:-1]
# Do any modules provided match the provided causes module?
mod_match = any(
utils.array_prefix_matches(mod_pieces,
cause_type_name_mod_pieces)
for mod_pieces in cleaned_split_allowed_modules)
if mod_match:
cause_cls = importutils.import_class(cause_type_name)
cause_cls = ensure_base_exception(cause_type_name, cause_cls)
return cause_cls
return matcher
|
Creates a matcher that matches a list/set/tuple of allowed modules.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/finders.py#L44-L73
| null |
# -*- coding: utf-8 -*-
# Copyright (C) 2016 GoDaddy Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import itertools
from oslo_utils import importutils
from oslo_utils import reflection
from failure import _utils as utils
class InvalidTypeError(TypeError):
pass
def ensure_base_exception(cause_type_name, cls):
# Ensure source class is correct (ie that it has the right
# root that **all** python exceptions must have); if not right then
# it will be discarded.
if not issubclass(cls, BaseException):
raise InvalidTypeError(
"Cause with type '%s' was regenerated as a non-exception"
" base class '%s'" % (cause_type_name,
reflection.get_class_name(cls)))
else:
return cls
def match_classes(allowed_classes):
"""Creates a matcher that matches a list/tuple of allowed classes."""
cleaned_allowed_classes = [
utils.cls_to_cls_name(tmp_cls)
for tmp_cls in allowed_classes
]
def matcher(cause):
cause_cls = None
cause_type_name = cause.exception_type_names[0]
try:
cause_cls_idx = cleaned_allowed_classes.index(cause_type_name)
except ValueError:
pass
else:
cause_cls = allowed_classes[cause_cls_idx]
if not isinstance(cause_cls, type):
cause_cls = importutils.import_class(cause_cls)
cause_cls = ensure_base_exception(cause_type_name, cause_cls)
return cause_cls
return matcher
def combine_or(matcher, *more_matchers):
"""Combines more than one matcher together (first that matches wins)."""
def matcher(cause):
for sub_matcher in itertools.chain([matcher], more_matchers):
cause_cls = sub_matcher(cause)
if cause_cls is not None:
return cause_cls
return None
return matcher
|
harlowja/failure
|
failure/finders.py
|
match_classes
|
python
|
def match_classes(allowed_classes):
cleaned_allowed_classes = [
utils.cls_to_cls_name(tmp_cls)
for tmp_cls in allowed_classes
]
def matcher(cause):
cause_cls = None
cause_type_name = cause.exception_type_names[0]
try:
cause_cls_idx = cleaned_allowed_classes.index(cause_type_name)
except ValueError:
pass
else:
cause_cls = allowed_classes[cause_cls_idx]
if not isinstance(cause_cls, type):
cause_cls = importutils.import_class(cause_cls)
cause_cls = ensure_base_exception(cause_type_name, cause_cls)
return cause_cls
return matcher
|
Creates a matcher that matches a list/tuple of allowed classes.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/finders.py#L76-L97
| null |
# -*- coding: utf-8 -*-
# Copyright (C) 2016 GoDaddy Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import itertools
from oslo_utils import importutils
from oslo_utils import reflection
from failure import _utils as utils
class InvalidTypeError(TypeError):
pass
def ensure_base_exception(cause_type_name, cls):
# Ensure source class is correct (ie that it has the right
# root that **all** python exceptions must have); if not right then
# it will be discarded.
if not issubclass(cls, BaseException):
raise InvalidTypeError(
"Cause with type '%s' was regenerated as a non-exception"
" base class '%s'" % (cause_type_name,
reflection.get_class_name(cls)))
else:
return cls
def match_modules(allowed_modules):
"""Creates a matcher that matches a list/set/tuple of allowed modules."""
cleaned_allowed_modules = [
utils.mod_to_mod_name(tmp_mod)
for tmp_mod in allowed_modules
]
cleaned_split_allowed_modules = [
tmp_mod.split(".")
for tmp_mod in cleaned_allowed_modules
]
cleaned_allowed_modules = []
del cleaned_allowed_modules
def matcher(cause):
cause_cls = None
cause_type_name = cause.exception_type_names[0]
# Rip off the class name (usually at the end).
cause_type_name_pieces = cause_type_name.split(".")
cause_type_name_mod_pieces = cause_type_name_pieces[0:-1]
# Do any modules provided match the provided causes module?
mod_match = any(
utils.array_prefix_matches(mod_pieces,
cause_type_name_mod_pieces)
for mod_pieces in cleaned_split_allowed_modules)
if mod_match:
cause_cls = importutils.import_class(cause_type_name)
cause_cls = ensure_base_exception(cause_type_name, cause_cls)
return cause_cls
return matcher
def combine_or(matcher, *more_matchers):
"""Combines more than one matcher together (first that matches wins)."""
def matcher(cause):
for sub_matcher in itertools.chain([matcher], more_matchers):
cause_cls = sub_matcher(cause)
if cause_cls is not None:
return cause_cls
return None
return matcher
|
harlowja/failure
|
failure/finders.py
|
combine_or
|
python
|
def combine_or(matcher, *more_matchers):
def matcher(cause):
for sub_matcher in itertools.chain([matcher], more_matchers):
cause_cls = sub_matcher(cause)
if cause_cls is not None:
return cause_cls
return None
return matcher
|
Combines more than one matcher together (first that matches wins).
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/finders.py#L100-L110
| null |
# -*- coding: utf-8 -*-
# Copyright (C) 2016 GoDaddy Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import itertools
from oslo_utils import importutils
from oslo_utils import reflection
from failure import _utils as utils
class InvalidTypeError(TypeError):
pass
def ensure_base_exception(cause_type_name, cls):
# Ensure source class is correct (ie that it has the right
# root that **all** python exceptions must have); if not right then
# it will be discarded.
if not issubclass(cls, BaseException):
raise InvalidTypeError(
"Cause with type '%s' was regenerated as a non-exception"
" base class '%s'" % (cause_type_name,
reflection.get_class_name(cls)))
else:
return cls
def match_modules(allowed_modules):
"""Creates a matcher that matches a list/set/tuple of allowed modules."""
cleaned_allowed_modules = [
utils.mod_to_mod_name(tmp_mod)
for tmp_mod in allowed_modules
]
cleaned_split_allowed_modules = [
tmp_mod.split(".")
for tmp_mod in cleaned_allowed_modules
]
cleaned_allowed_modules = []
del cleaned_allowed_modules
def matcher(cause):
cause_cls = None
cause_type_name = cause.exception_type_names[0]
# Rip off the class name (usually at the end).
cause_type_name_pieces = cause_type_name.split(".")
cause_type_name_mod_pieces = cause_type_name_pieces[0:-1]
# Do any modules provided match the provided causes module?
mod_match = any(
utils.array_prefix_matches(mod_pieces,
cause_type_name_mod_pieces)
for mod_pieces in cleaned_split_allowed_modules)
if mod_match:
cause_cls = importutils.import_class(cause_type_name)
cause_cls = ensure_base_exception(cause_type_name, cause_cls)
return cause_cls
return matcher
def match_classes(allowed_classes):
"""Creates a matcher that matches a list/tuple of allowed classes."""
cleaned_allowed_classes = [
utils.cls_to_cls_name(tmp_cls)
for tmp_cls in allowed_classes
]
def matcher(cause):
cause_cls = None
cause_type_name = cause.exception_type_names[0]
try:
cause_cls_idx = cleaned_allowed_classes.index(cause_type_name)
except ValueError:
pass
else:
cause_cls = allowed_classes[cause_cls_idx]
if not isinstance(cause_cls, type):
cause_cls = importutils.import_class(cause_cls)
cause_cls = ensure_base_exception(cause_type_name, cause_cls)
return cause_cls
return matcher
|
harlowja/failure
|
failure/failure.py
|
WrappedFailure.check
|
python
|
def check(self, *exc_classes):
if not exc_classes:
return None
for cause in self:
result = cause.check(*exc_classes)
if result is not None:
return result
return None
|
Check if any of exception classes caused the failure/s.
:param exc_classes: exception types/exception type names to
search for.
If any of the contained failures were caused by an exception of a
given type, the corresponding argument that matched is returned. If
not then ``None`` is returned.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L77-L93
| null |
class WrappedFailure(utils.StrMixin, Exception):
"""Wraps one or several failure objects.
When exception/s cannot be re-raised (for example, because the value and
traceback are lost in serialization) or there are several exceptions active
at the same time (due to more than one thread raising exceptions), we will
wrap the corresponding failure objects into this exception class and
*may* reraise this exception type to allow users to handle the contained
failures/causes as they see fit...
See the failure class documentation for a more comprehensive set of reasons
why this object *may* be reraised instead of the original exception.
:param causes: the :py:class:`~failure.Failure` objects
that caused this this exception to be raised.
"""
def __init__(self, causes):
super(WrappedFailure, self).__init__()
self._causes = []
for cause in causes:
if cause.check(type(self)) and cause.exception is not None:
# NOTE(imelnikov): flatten wrapped failures.
self._causes.extend(cause.exception)
else:
self._causes.append(cause)
def __iter__(self):
"""Iterate over failures that caused the exception."""
return iter(self._causes)
def __len__(self):
"""Return number of wrapped failures."""
return len(self._causes)
def __bytes__(self):
buf = six.BytesIO()
buf.write(b'WrappedFailure: [')
causes_gen = (six.binary_type(cause) for cause in self._causes)
buf.write(b", ".join(causes_gen))
buf.write(b']')
return buf.getvalue()
def __unicode__(self):
buf = six.StringIO()
buf.write(u'WrappedFailure: [')
causes_gen = (six.text_type(cause) for cause in self._causes)
buf.write(u", ".join(causes_gen))
buf.write(u']')
return buf.getvalue()
|
harlowja/failure
|
failure/failure.py
|
Failure.from_exc_info
|
python
|
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
|
Creates a failure object from a ``sys.exc_info()`` tuple.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L241-L291
|
[
"def extract_roots(exc_type):\n return to_tuple(\n reflection.get_all_class_names(exc_type, up_to=BaseException,\n truncate_builtins=False))\n",
"def _extract_cause(cls, exc_val):\n \"\"\"Helper routine to extract nested cause (if any).\"\"\"\n # See: https://www.python.org/dev/peps/pep-3134/ for why/what\n # these are...\n #\n # '__cause__' attribute for explicitly chained exceptions\n # '__context__' attribute for implicitly chained exceptions\n # '__traceback__' attribute for the traceback\n #\n # See: https://www.python.org/dev/peps/pep-0415/ for why/what\n # the '__suppress_context__' is/means/implies...\n nested_exc_vals = []\n seen = [exc_val]\n while True:\n suppress_context = getattr(\n exc_val, '__suppress_context__', False)\n if suppress_context:\n attr_lookups = ['__cause__']\n else:\n attr_lookups = ['__cause__', '__context__']\n nested_exc_val = None\n for attr_name in attr_lookups:\n attr_val = getattr(exc_val, attr_name, None)\n if attr_val is None:\n continue\n nested_exc_val = attr_val\n if nested_exc_val is None or nested_exc_val in seen:\n break\n seen.append(nested_exc_val)\n nested_exc_vals.append(nested_exc_val)\n exc_val = nested_exc_val\n last_cause = None\n for exc_val in reversed(nested_exc_vals):\n f = cls.from_exception(exc_val, cause=last_cause,\n find_cause=False)\n last_cause = f\n return last_cause\n"
] |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.from_exception
|
python
|
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
|
Creates a failure object from a exception instance.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L294-L304
|
[
"def from_exc_info(cls, exc_info=None,\n retain_exc_info=True,\n cause=None, find_cause=True):\n \"\"\"Creates a failure object from a ``sys.exc_info()`` tuple.\"\"\"\n if exc_info is None:\n exc_info = sys.exc_info()\n if not any(exc_info):\n raise NoActiveException(\"No exception currently\"\n \" being handled\")\n # This should always be the (type, value, traceback) tuple,\n # either from a prior sys.exc_info() call or from some other\n # creation...\n if len(exc_info) != 3:\n raise ValueError(\"Provided 'exc_info' must contain three\"\n \" elements\")\n exc_type, exc_val, exc_tb = exc_info\n try:\n if exc_type is None or exc_val is None:\n raise ValueError(\"Invalid exception tuple (exception\"\n \" type and exception value must\"\n \" be provided)\")\n exc_args = tuple(getattr(exc_val, 'args', []))\n exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))\n exc_type_names = utils.extract_roots(exc_type)\n if not exc_type_names:\n exc_type_name = reflection.get_class_name(\n exc_val, truncate_builtins=False)\n # This should only be possible if the exception provided\n # was not really an exception...\n raise TypeError(\"Invalid exception type '%s' (not an\"\n \" exception)\" % (exc_type_name))\n exception_str = utils.exception_message(exc_val)\n if hasattr(exc_val, '__traceback_str__'):\n traceback_str = exc_val.__traceback_str__\n else:\n if exc_tb is not None:\n traceback_str = '\\n'.join(\n traceback.format_exception(*exc_info))\n else:\n traceback_str = ''\n if not retain_exc_info:\n exc_info = None\n if find_cause and cause is None:\n cause = cls._extract_cause(exc_val)\n return cls(exc_info=exc_info, exc_args=exc_args,\n exc_kwargs=exc_kwargs, exception_str=exception_str,\n exc_type_names=exc_type_names, cause=cause,\n traceback_str=traceback_str,\n generated_on=sys.version_info[0:2])\n finally:\n del exc_type, exc_val, exc_tb\n"
] |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.validate
|
python
|
def validate(cls, data):
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
|
Validate input data matches expected failure ``dict`` format.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L307-L338
| null |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.matches
|
python
|
def matches(self, other):
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
|
Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L351-L362
|
[
"def _matches(self, other):\n if self is other:\n return True\n return (self.exception_type_names == other.exception_type_names and\n self.exception_args == other.exception_args and\n self.exception_kwargs == other.exception_kwargs and\n self.exception_str == other.exception_str and\n self.traceback_str == other.traceback_str and\n self.cause == other.cause and\n self.generated_on == other.generated_on)\n"
] |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.reraise_if_any
|
python
|
def reraise_if_any(failures, cause_cls_finder=None):
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
|
Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L436-L451
| null |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.reraise
|
python
|
def reraise(self, cause_cls_finder=None):
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
|
Re-raise captured exception (possibly trying to recreate).
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L453-L482
|
[
"def iter_causes(self):\n \"\"\"Iterate over all causes.\"\"\"\n curr = self._cause\n while curr is not None:\n yield curr\n curr = curr._cause\n"
] |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.check
|
python
|
def check(self, *exc_classes):
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
|
Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L484-L496
|
[
"def cls_to_cls_name(cls):\n if isinstance(cls, type):\n cls_name = reflection.get_class_name(cls, truncate_builtins=False)\n else:\n cls_name = str(cls)\n return cls_name\n"
] |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.pformat
|
python
|
def pformat(self, traceback=False):
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
|
Pretty formats the failure object into a string.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L513-L532
| null |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.iter_causes
|
python
|
def iter_causes(self):
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
|
Iterate over all causes.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L534-L539
| null |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure._extract_cause
|
python
|
def _extract_cause(cls, exc_val):
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
|
Helper routine to extract nested cause (if any).
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L584-L620
| null |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.from_dict
|
python
|
def from_dict(cls, data):
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
|
Converts this from a dictionary to a object.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L623-L629
|
[
"def from_dict(cls, data):\n \"\"\"Converts this from a dictionary to a object.\"\"\"\n data = dict(data)\n cause = data.get('cause')\n if cause is not None:\n data['cause'] = cls.from_dict(cause)\n return cls(**data)\n"
] |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.to_dict
|
python
|
def to_dict(self, include_args=True, include_kwargs=True):
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
|
Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L631-L650
| null |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def copy(self, deep=False):
"""Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
"""
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
harlowja/failure
|
failure/failure.py
|
Failure.copy
|
python
|
def copy(self, deep=False):
cause = self._cause
if cause is not None:
cause = cause.copy(deep=deep)
exc_info = utils.copy_exc_info(self.exc_info, deep=deep)
exc_args = self.exception_args
exc_kwargs = self.exception_kwargs
if deep:
exc_args = copy.deepcopy(exc_args)
exc_kwargs = copy.deepcopy(exc_kwargs)
else:
exc_args = tuple(exc_args)
exc_kwargs = exc_kwargs.copy()
# These are just simple int/strings, so deep copy doesn't really
# matter/apply here (as they are immutable anyway).
exc_type_names = tuple(self._exc_type_names)
generated_on = self._generated_on
if generated_on:
generated_on = tuple(generated_on)
# NOTE(harlowja): use `self.__class__` here so that we can work
# with subclasses (assuming anyone makes one).
return self.__class__(exc_info=exc_info,
exception_str=self.exception_str,
traceback_str=self.traceback_str,
exc_args=exc_args,
exc_kwargs=exc_kwargs,
exc_type_names=exc_type_names,
cause=cause, generated_on=generated_on)
|
Copies this object (shallow or deep).
:param deep: boolean indicating whether to do a deep copy (or a
shallow copy).
|
train
|
https://github.com/harlowja/failure/blob/9ea9a46ebb26c6d7da2553c80e36892f3997bd6f/failure/failure.py#L652-L684
|
[
"def copy_exc_info(exc_info, deep=False):\n if exc_info is None:\n return None\n exc_type, exc_value, exc_tb = exc_info\n # NOTE(imelnikov): there is no need to copy the exception type, and\n # a shallow copy of the value is fine and we can't copy the traceback since\n # it contains reference to the internal stack frames...\n if deep:\n return (exc_type, copy.deepcopy(exc_value), exc_tb)\n else:\n return (exc_type, copy.copy(exc_value), exc_tb)\n"
] |
class Failure(utils.StrMixin):
"""An immutable object that represents failure.
Failure objects encapsulate exception information so that they can be
re-used later to re-raise, inspect, examine, log, print, serialize,
deserialize...
For those who are curious, here are a few reasons why the original
exception itself *may* not be reraised and instead a reraised wrapped
failure exception object will be instead. These explanations are *only*
applicable when a failure object is serialized and deserialized (when it is
retained inside the python process that the exception was created in the
the original exception can be reraised correctly without issue).
* Traceback objects are not serializable/recreatable, since they contain
references to stack frames at the location where the exception was
raised. When a failure object is serialized and sent across a channel
and recreated it is *not* possible to restore the original traceback and
originating stack frames.
* The original exception *type* can not *always* be guaranteed to be
found, certain nodes can run code that is not accessible/available
when the failure is being deserialized. Even if it was possible to use
pickle safely (which it is not) it would not *always*
be possible to find the originating exception or associated code in this
situation.
* The original exception *type* can not be guaranteed to be constructed in
a *correct* manner. At the time of failure object creation the exception
has already been created and the failure object can not assume it has
knowledge (or the ability) to recreate the original type of the captured
exception (this is especially hard if the original exception was created
via a complex process via some custom exception ``__init__`` method).
* The original exception *type* can not *always* be guaranteed to be
constructed and/or imported in a *safe* manner. Importing *foreign*
exception types dynamically can be problematic when not done
correctly and in a safe manner; since failure objects can
capture *any* exception it would be *unsafe* to try to import
those exception types namespaces and modules on the receiver side
dynamically (this would create similar issues as the ``pickle`` module
has).
TODO(harlowja): use parts of http://bugs.python.org/issue17911 and the
backport at https://pypi.python.org/pypi/traceback2/ to (hopefully)
simplify the methods and contents of this object...
"""
BASE_EXCEPTIONS = {
# py2.x old/legacy names...
2: ('exceptions.BaseException', 'exceptions.Exception'),
# py3.x new names...
3: ('builtins.BaseException', 'builtins.Exception'),
}
"""
Root exceptions of all other python exceptions (as a string).
See: https://docs.python.org/2/library/exceptions.html
"""
#: Expected failure schema (in json schema format).
SCHEMA = {
"$ref": "#/definitions/cause",
"definitions": {
"cause": {
"type": "object",
'properties': {
'exc_args': {
"type": "array",
"minItems": 0,
},
'exc_kwargs': {
"type": "object",
"additionalProperties": True,
},
'exception_str': {
"type": "string",
},
'traceback_str': {
"type": "string",
},
'exc_type_names': {
"type": "array",
"items": {
"type": "string",
},
"minItems": 1,
},
'generated_on': {
"type": "array",
"items": {
"type": "number",
},
"minItems": 1,
},
'cause': {
"type": "object",
"$ref": "#/definitions/cause",
},
},
"required": [
"exception_str",
'traceback_str',
'exc_type_names',
'generated_on',
],
"additionalProperties": True,
},
},
}
def __init__(self, exc_info=None, exc_args=None,
exc_kwargs=None, exception_str='',
exc_type_names=None, cause=None,
traceback_str='', generated_on=None):
exc_type_names = utils.to_tuple(exc_type_names)
if not exc_type_names:
raise ValueError("Invalid exception type (no type names"
" provided)")
self._exc_type_names = exc_type_names
self._exc_info = utils.to_tuple(exc_info, on_none=None)
self._exc_args = utils.to_tuple(exc_args)
if exc_kwargs:
self._exc_kwargs = dict(exc_kwargs)
else:
self._exc_kwargs = {}
self._exception_str = exception_str
self._cause = cause
self._traceback_str = traceback_str
self._generated_on = utils.to_tuple(generated_on, on_none=None)
@classmethod
def from_exc_info(cls, exc_info=None,
retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a ``sys.exc_info()`` tuple."""
if exc_info is None:
exc_info = sys.exc_info()
if not any(exc_info):
raise NoActiveException("No exception currently"
" being handled")
# This should always be the (type, value, traceback) tuple,
# either from a prior sys.exc_info() call or from some other
# creation...
if len(exc_info) != 3:
raise ValueError("Provided 'exc_info' must contain three"
" elements")
exc_type, exc_val, exc_tb = exc_info
try:
if exc_type is None or exc_val is None:
raise ValueError("Invalid exception tuple (exception"
" type and exception value must"
" be provided)")
exc_args = tuple(getattr(exc_val, 'args', []))
exc_kwargs = dict(getattr(exc_val, 'kwargs', {}))
exc_type_names = utils.extract_roots(exc_type)
if not exc_type_names:
exc_type_name = reflection.get_class_name(
exc_val, truncate_builtins=False)
# This should only be possible if the exception provided
# was not really an exception...
raise TypeError("Invalid exception type '%s' (not an"
" exception)" % (exc_type_name))
exception_str = utils.exception_message(exc_val)
if hasattr(exc_val, '__traceback_str__'):
traceback_str = exc_val.__traceback_str__
else:
if exc_tb is not None:
traceback_str = '\n'.join(
traceback.format_exception(*exc_info))
else:
traceback_str = ''
if not retain_exc_info:
exc_info = None
if find_cause and cause is None:
cause = cls._extract_cause(exc_val)
return cls(exc_info=exc_info, exc_args=exc_args,
exc_kwargs=exc_kwargs, exception_str=exception_str,
exc_type_names=exc_type_names, cause=cause,
traceback_str=traceback_str,
generated_on=sys.version_info[0:2])
finally:
del exc_type, exc_val, exc_tb
@classmethod
def from_exception(cls, exception, retain_exc_info=True,
cause=None, find_cause=True):
"""Creates a failure object from a exception instance."""
exc_info = (
type(exception),
exception,
getattr(exception, '__traceback__', None)
)
return cls.from_exc_info(exc_info=exc_info,
retain_exc_info=retain_exc_info,
cause=cause, find_cause=find_cause)
@classmethod
def validate(cls, data):
"""Validate input data matches expected failure ``dict`` format."""
try:
jsonschema.validate(
data, cls.SCHEMA,
# See: https://github.com/Julian/jsonschema/issues/148
types={'array': (list, tuple)})
except jsonschema.ValidationError as e:
raise InvalidFormat("Failure data not of the"
" expected format: %s" % (e.message))
else:
# Ensure that all 'exc_type_names' originate from one of
# base exceptions, because those are the root exceptions that
# python mandates/provides and anything else is invalid...
causes = collections.deque([data])
while causes:
cause = causes.popleft()
try:
generated_on = cause['generated_on']
ok_bases = cls.BASE_EXCEPTIONS[generated_on[0]]
except (KeyError, IndexError):
ok_bases = []
root_exc_type = cause['exc_type_names'][-1]
if root_exc_type not in ok_bases:
raise InvalidFormat(
"Failure data 'exc_type_names' must"
" have an initial exception type that is one"
" of %s types: '%s' is not one of those"
" types" % (ok_bases, root_exc_type))
sub_cause = cause.get('cause')
if sub_cause is not None:
causes.append(sub_cause)
def _matches(self, other):
if self is other:
return True
return (self.exception_type_names == other.exception_type_names and
self.exception_args == other.exception_args and
self.exception_kwargs == other.exception_kwargs and
self.exception_str == other.exception_str and
self.traceback_str == other.traceback_str and
self.cause == other.cause and
self.generated_on == other.generated_on)
def matches(self, other):
"""Checks if another object is equivalent to this object.
:returns: checks if another object is equivalent to this object
:rtype: boolean
"""
if not isinstance(other, Failure):
return False
if self.exc_info is None or other.exc_info is None:
return self._matches(other)
else:
return self == other
def __eq__(self, other):
if not isinstance(other, Failure):
return NotImplemented
return (self._matches(other) and
utils.are_equal_exc_info_tuples(self.exc_info,
other.exc_info))
def __ne__(self, other):
return not (self == other)
# NOTE(imelnikov): obj.__hash__() should return same values for equal
# objects, so we should redefine __hash__. Failure equality semantics
# is a bit complicated, so for now we just mark Failure objects as
# unhashable. See python docs on object.__hash__ for more info:
# http://docs.python.org/2/reference/datamodel.html#object.__hash__
__hash__ = None
@property
def exception(self):
"""Exception value, or ``None`` if exception value is not present.
Exception value *may* be lost during serialization.
"""
if self._exc_info:
return self._exc_info[1]
else:
return None
@property
def generated_on(self):
"""Python major & minor version tuple this failure was generated on.
May be ``None`` if not provided during creation (or after if lost).
"""
return self._generated_on
@property
def exception_str(self):
"""String representation of exception."""
return self._exception_str
@property
def exception_args(self):
"""Tuple of arguments given to the exception constructor."""
return self._exc_args
@property
def exception_kwargs(self):
"""Dict of keyword arguments given to the exception constructor."""
return self._exc_kwargs
@property
def exception_type_names(self):
"""Tuple of current exception type **names** (in MRO order)."""
return self._exc_type_names
@property
def exc_info(self):
"""Exception info tuple or ``None``.
See: https://docs.python.org/2/library/sys.html#sys.exc_info for what
the contents of this tuple are (if none, then no contents can
be examined).
"""
return self._exc_info
@property
def traceback_str(self):
"""Exception traceback as string."""
return self._traceback_str
@staticmethod
def reraise_if_any(failures, cause_cls_finder=None):
"""Re-raise exceptions if argument is not empty.
If argument is empty list/tuple/iterator, this method returns
None. If argument is converted into a list with a
single ``Failure`` object in it, that failure is reraised. Else, a
:class:`~.WrappedFailure` exception is raised with the failure
list as causes.
"""
if not isinstance(failures, (list, tuple)):
# Convert generators/other into a list...
failures = list(failures)
if len(failures) == 1:
failures[0].reraise(cause_cls_finder=cause_cls_finder)
elif len(failures) > 1:
raise WrappedFailure(failures)
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
def check(self, *exc_classes):
"""Check if any of ``exc_classes`` caused the failure.
Arguments of this method can be exception types or type
names (strings **fully qualified**). If captured exception is
an instance of exception of given type, the corresponding argument
is returned, otherwise ``None`` is returned.
"""
for cls in exc_classes:
cls_name = utils.cls_to_cls_name(cls)
if cls_name in self._exc_type_names:
return cls
return None
@property
def cause(self):
"""Nested failure *cause* of this failure.
This property is typically only useful on 3.x or newer versions
of python as older versions do **not** have associated causes.
Refer to :pep:`3134` and :pep:`409` and :pep:`415` for what
this is examining to find failure causes.
"""
return self._cause
def __unicode__(self):
return self.pformat()
def pformat(self, traceback=False):
"""Pretty formats the failure object into a string."""
buf = six.StringIO()
if not self._exc_type_names:
buf.write('Failure: %s' % (self._exception_str))
else:
buf.write('Failure: %s: %s' % (self._exc_type_names[0],
self._exception_str))
if traceback:
if self._traceback_str is not None:
traceback_str = self._traceback_str.rstrip()
else:
traceback_str = None
if traceback_str:
buf.write(os.linesep)
buf.write(traceback_str)
else:
buf.write(os.linesep)
buf.write('Traceback not available.')
return buf.getvalue()
def iter_causes(self):
"""Iterate over all causes."""
curr = self._cause
while curr is not None:
yield curr
curr = curr._cause
def __getstate__(self):
dct = self.to_dict()
if self._exc_info:
# Avoids 'TypeError: can't pickle traceback objects'
dct['exc_info'] = self._exc_info[0:2]
return dct
def __setstate__(self, dct):
self._exception_str = dct['exception_str']
if 'exc_args' in dct:
self._exc_args = tuple(dct['exc_args'])
else:
# Guess we got an older version somehow, before this
# was added, so at that point just set to an empty tuple...
self._exc_args = ()
if 'exc_kwargs' in dct:
self._exc_kwargs = dict(dct['exc_kwargs'])
else:
self._exc_kwargs = {}
self._traceback_str = dct['traceback_str']
self._exc_type_names = dct['exc_type_names']
self._generated_on = dct['generated_on']
if 'exc_info' in dct:
# Tracebacks can't be serialized/deserialized, but since we
# provide a traceback string (and more) this should be
# acceptable...
#
# TODO(harlowja): in the future we could do something like
# what the twisted people have done, see for example
# twisted-13.0.0/twisted/python/failure.py#L89 for how they
# created a fake traceback object...
exc_info = list(dct['exc_info'])
while len(exc_info) < 3:
exc_info.append(None)
self._exc_info = tuple(exc_info[0:3])
else:
self._exc_info = None
cause = dct.get('cause')
if cause is not None:
cause = self.from_dict(cause)
self._cause = cause
@classmethod
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause
@classmethod
def from_dict(cls, data):
"""Converts this from a dictionary to a object."""
data = dict(data)
cause = data.get('cause')
if cause is not None:
data['cause'] = cls.from_dict(cause)
return cls(**data)
def to_dict(self, include_args=True, include_kwargs=True):
"""Converts this object to a dictionary.
:param include_args: boolean indicating whether to include the
exception args in the output.
:param include_kwargs: boolean indicating whether to include the
exception kwargs in the output.
"""
data = {
'exception_str': self.exception_str,
'traceback_str': self.traceback_str,
'exc_type_names': self.exception_type_names,
'exc_args': self.exception_args if include_args else tuple(),
'exc_kwargs': self.exception_kwargs if include_kwargs else {},
'generated_on': self.generated_on,
}
if self._cause is not None:
data['cause'] = self._cause.to_dict(include_args=include_args,
include_kwargs=include_kwargs)
return data
|
ambitioninc/django-entity-event
|
entity_event/context_loader.py
|
get_context_hints_per_source
|
python
|
def get_context_hints_per_source(context_renderers):
# Merge the context render hints for each source as there can be multiple context hints for
# sources depending on the render target. Merging them together involves combining select
# and prefetch related hints for each context renderer
context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {
'app_name': None,
'model_name': None,
'select_related': set(),
'prefetch_related': set(),
}))
for cr in context_renderers:
for key, hints in cr.context_hints.items() if cr.context_hints else []:
for source in cr.get_sources():
context_hints_per_source[source][key]['app_name'] = hints['app_name']
context_hints_per_source[source][key]['model_name'] = hints['model_name']
context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))
context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))
return context_hints_per_source
|
Given a list of context renderers, return a dictionary of context hints per source.
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_loader.py#L21-L42
| null |
"""
A module for loading contexts using context hints.
"""
from collections import defaultdict
import six
from django.conf import settings
from django.db.models import Q
try:
# Django 1.9
from django.apps import apps
get_model = apps.get_model
except ImportError: # pragma: no cover
# Django < 1.9
from django.db.models import get_model
from manager_utils import id_dict
from entity_event.models import ContextRenderer
def get_querysets_for_context_hints(context_hints_per_source):
"""
Given a list of context hint dictionaries, return a dictionary
of querysets for efficient context loading. The return value
is structured as follows:
{
model: queryset,
...
}
"""
model_select_relateds = defaultdict(set)
model_prefetch_relateds = defaultdict(set)
model_querysets = {}
for context_hints in context_hints_per_source.values():
for hints in context_hints.values():
model = get_model(hints['app_name'], hints['model_name'])
model_querysets[model] = model.objects
model_select_relateds[model].update(hints.get('select_related', []))
model_prefetch_relateds[model].update(hints.get('prefetch_related', []))
# Attach select and prefetch related parameters to the querysets if needed
for model, queryset in model_querysets.items():
if model_select_relateds[model]:
queryset = queryset.select_related(*model_select_relateds[model])
if model_prefetch_relateds[model]:
queryset = queryset.prefetch_related(*model_prefetch_relateds[model])
model_querysets[model] = queryset
return model_querysets
def dict_find(d, which_key):
"""
Finds key values in a nested dictionary. Returns a tuple of the dictionary in which
the key was found along with the value
"""
# If the starting point is a list, iterate recursively over all values
if isinstance(d, (list, tuple)):
for i in d:
for result in dict_find(i, which_key):
yield result
# Else, iterate over all key values of the dictionary
elif isinstance(d, dict):
for k, v in d.items():
if k == which_key:
yield d, v
for result in dict_find(v, which_key):
yield result
def get_model_ids_to_fetch(events, context_hints_per_source):
"""
Obtains the ids of all models that need to be fetched. Returns a dictionary of models that
point to sets of ids that need to be fetched. Return output is as follows:
{
model: [id1, id2, ...],
...
}
"""
number_types = (complex, float) + six.integer_types
model_ids_to_fetch = defaultdict(set)
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
for d, value in dict_find(event.context, context_key):
values = value if isinstance(value, list) else [value]
model_ids_to_fetch[get_model(hints['app_name'], hints['model_name'])].update(
v for v in values if isinstance(v, number_types)
)
return model_ids_to_fetch
def fetch_model_data(model_querysets, model_ids_to_fetch):
"""
Given a dictionary of models to querysets and model IDs to models, fetch the IDs
for every model and return the objects in the following structure.
{
model: {
id: obj,
...
},
...
}
"""
return {
model: id_dict(model_querysets[model].filter(id__in=ids_to_fetch))
for model, ids_to_fetch in model_ids_to_fetch.items()
}
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
"""
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
"""
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value)
def load_renderers_into_events(events, mediums, context_renderers, default_rendering_style):
"""
Given the events and the context renderers, load the renderers into the event objects
so that they may be able to call the 'render' method later on.
"""
# Make a mapping of source groups and rendering styles to context renderers. Do
# the same for sources and rendering styles to context renderers
source_group_style_to_renderer = {
(cr.source_group_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_group_id
}
source_style_to_renderer = {
(cr.source_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_id
}
for e in events:
for m in mediums:
# Try the following when loading a context renderer for a medium in an event.
# 1. Try to look up the renderer based on the source group and medium rendering style
# 2. If step 1 doesn't work, look up based on the source and medium rendering style
# 3. If step 2 doesn't work, look up based on the source group and default rendering style
# 4. if step 3 doesn't work, look up based on the source and default rendering style
# If none of those steps work, this event will not be able to be rendered for the mediun
cr = source_group_style_to_renderer.get((e.source.group_id, m.rendering_style_id))
if not cr:
cr = source_style_to_renderer.get((e.source_id, m.rendering_style_id))
if not cr and default_rendering_style:
cr = source_group_style_to_renderer.get((e.source.group_id, default_rendering_style.id))
if not cr and default_rendering_style:
cr = source_style_to_renderer.get((e.source_id, default_rendering_style.id))
if cr:
e._context_renderers[m] = cr
def get_default_rendering_style():
default_rendering_style = getattr(settings, 'DEFAULT_ENTITY_EVENT_RENDERING_STYLE', None)
if default_rendering_style:
default_rendering_style = get_model('entity_event', 'RenderingStyle').objects.get(name=default_rendering_style)
return default_rendering_style
def load_contexts_and_renderers(events, mediums):
"""
Given a list of events and mediums, load the context model data into the contexts of the events.
"""
sources = {event.source for event in events}
rendering_styles = {medium.rendering_style for medium in mediums if medium.rendering_style}
# Fetch the default rendering style and add it to the set of rendering styles
default_rendering_style = get_default_rendering_style()
if default_rendering_style:
rendering_styles.add(default_rendering_style)
context_renderers = ContextRenderer.objects.filter(
Q(source__in=sources, rendering_style__in=rendering_styles) |
Q(source_group_id__in=[s.group_id for s in sources], rendering_style__in=rendering_styles)).select_related(
'source', 'rendering_style').prefetch_related('source_group__source_set')
context_hints_per_source = get_context_hints_per_source(context_renderers)
model_querysets = get_querysets_for_context_hints(context_hints_per_source)
model_ids_to_fetch = get_model_ids_to_fetch(events, context_hints_per_source)
model_data = fetch_model_data(model_querysets, model_ids_to_fetch)
load_fetched_objects_into_contexts(events, model_data, context_hints_per_source)
load_renderers_into_events(events, mediums, context_renderers, default_rendering_style)
return events
|
ambitioninc/django-entity-event
|
entity_event/context_loader.py
|
get_querysets_for_context_hints
|
python
|
def get_querysets_for_context_hints(context_hints_per_source):
model_select_relateds = defaultdict(set)
model_prefetch_relateds = defaultdict(set)
model_querysets = {}
for context_hints in context_hints_per_source.values():
for hints in context_hints.values():
model = get_model(hints['app_name'], hints['model_name'])
model_querysets[model] = model.objects
model_select_relateds[model].update(hints.get('select_related', []))
model_prefetch_relateds[model].update(hints.get('prefetch_related', []))
# Attach select and prefetch related parameters to the querysets if needed
for model, queryset in model_querysets.items():
if model_select_relateds[model]:
queryset = queryset.select_related(*model_select_relateds[model])
if model_prefetch_relateds[model]:
queryset = queryset.prefetch_related(*model_prefetch_relateds[model])
model_querysets[model] = queryset
return model_querysets
|
Given a list of context hint dictionaries, return a dictionary
of querysets for efficient context loading. The return value
is structured as follows:
{
model: queryset,
...
}
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_loader.py#L45-L74
| null |
"""
A module for loading contexts using context hints.
"""
from collections import defaultdict
import six
from django.conf import settings
from django.db.models import Q
try:
# Django 1.9
from django.apps import apps
get_model = apps.get_model
except ImportError: # pragma: no cover
# Django < 1.9
from django.db.models import get_model
from manager_utils import id_dict
from entity_event.models import ContextRenderer
def get_context_hints_per_source(context_renderers):
"""
Given a list of context renderers, return a dictionary of context hints per source.
"""
# Merge the context render hints for each source as there can be multiple context hints for
# sources depending on the render target. Merging them together involves combining select
# and prefetch related hints for each context renderer
context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {
'app_name': None,
'model_name': None,
'select_related': set(),
'prefetch_related': set(),
}))
for cr in context_renderers:
for key, hints in cr.context_hints.items() if cr.context_hints else []:
for source in cr.get_sources():
context_hints_per_source[source][key]['app_name'] = hints['app_name']
context_hints_per_source[source][key]['model_name'] = hints['model_name']
context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))
context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))
return context_hints_per_source
def dict_find(d, which_key):
"""
Finds key values in a nested dictionary. Returns a tuple of the dictionary in which
the key was found along with the value
"""
# If the starting point is a list, iterate recursively over all values
if isinstance(d, (list, tuple)):
for i in d:
for result in dict_find(i, which_key):
yield result
# Else, iterate over all key values of the dictionary
elif isinstance(d, dict):
for k, v in d.items():
if k == which_key:
yield d, v
for result in dict_find(v, which_key):
yield result
def get_model_ids_to_fetch(events, context_hints_per_source):
"""
Obtains the ids of all models that need to be fetched. Returns a dictionary of models that
point to sets of ids that need to be fetched. Return output is as follows:
{
model: [id1, id2, ...],
...
}
"""
number_types = (complex, float) + six.integer_types
model_ids_to_fetch = defaultdict(set)
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
for d, value in dict_find(event.context, context_key):
values = value if isinstance(value, list) else [value]
model_ids_to_fetch[get_model(hints['app_name'], hints['model_name'])].update(
v for v in values if isinstance(v, number_types)
)
return model_ids_to_fetch
def fetch_model_data(model_querysets, model_ids_to_fetch):
"""
Given a dictionary of models to querysets and model IDs to models, fetch the IDs
for every model and return the objects in the following structure.
{
model: {
id: obj,
...
},
...
}
"""
return {
model: id_dict(model_querysets[model].filter(id__in=ids_to_fetch))
for model, ids_to_fetch in model_ids_to_fetch.items()
}
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
"""
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
"""
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value)
def load_renderers_into_events(events, mediums, context_renderers, default_rendering_style):
"""
Given the events and the context renderers, load the renderers into the event objects
so that they may be able to call the 'render' method later on.
"""
# Make a mapping of source groups and rendering styles to context renderers. Do
# the same for sources and rendering styles to context renderers
source_group_style_to_renderer = {
(cr.source_group_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_group_id
}
source_style_to_renderer = {
(cr.source_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_id
}
for e in events:
for m in mediums:
# Try the following when loading a context renderer for a medium in an event.
# 1. Try to look up the renderer based on the source group and medium rendering style
# 2. If step 1 doesn't work, look up based on the source and medium rendering style
# 3. If step 2 doesn't work, look up based on the source group and default rendering style
# 4. if step 3 doesn't work, look up based on the source and default rendering style
# If none of those steps work, this event will not be able to be rendered for the mediun
cr = source_group_style_to_renderer.get((e.source.group_id, m.rendering_style_id))
if not cr:
cr = source_style_to_renderer.get((e.source_id, m.rendering_style_id))
if not cr and default_rendering_style:
cr = source_group_style_to_renderer.get((e.source.group_id, default_rendering_style.id))
if not cr and default_rendering_style:
cr = source_style_to_renderer.get((e.source_id, default_rendering_style.id))
if cr:
e._context_renderers[m] = cr
def get_default_rendering_style():
default_rendering_style = getattr(settings, 'DEFAULT_ENTITY_EVENT_RENDERING_STYLE', None)
if default_rendering_style:
default_rendering_style = get_model('entity_event', 'RenderingStyle').objects.get(name=default_rendering_style)
return default_rendering_style
def load_contexts_and_renderers(events, mediums):
"""
Given a list of events and mediums, load the context model data into the contexts of the events.
"""
sources = {event.source for event in events}
rendering_styles = {medium.rendering_style for medium in mediums if medium.rendering_style}
# Fetch the default rendering style and add it to the set of rendering styles
default_rendering_style = get_default_rendering_style()
if default_rendering_style:
rendering_styles.add(default_rendering_style)
context_renderers = ContextRenderer.objects.filter(
Q(source__in=sources, rendering_style__in=rendering_styles) |
Q(source_group_id__in=[s.group_id for s in sources], rendering_style__in=rendering_styles)).select_related(
'source', 'rendering_style').prefetch_related('source_group__source_set')
context_hints_per_source = get_context_hints_per_source(context_renderers)
model_querysets = get_querysets_for_context_hints(context_hints_per_source)
model_ids_to_fetch = get_model_ids_to_fetch(events, context_hints_per_source)
model_data = fetch_model_data(model_querysets, model_ids_to_fetch)
load_fetched_objects_into_contexts(events, model_data, context_hints_per_source)
load_renderers_into_events(events, mediums, context_renderers, default_rendering_style)
return events
|
ambitioninc/django-entity-event
|
entity_event/context_loader.py
|
dict_find
|
python
|
def dict_find(d, which_key):
# If the starting point is a list, iterate recursively over all values
if isinstance(d, (list, tuple)):
for i in d:
for result in dict_find(i, which_key):
yield result
# Else, iterate over all key values of the dictionary
elif isinstance(d, dict):
for k, v in d.items():
if k == which_key:
yield d, v
for result in dict_find(v, which_key):
yield result
|
Finds key values in a nested dictionary. Returns a tuple of the dictionary in which
the key was found along with the value
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_loader.py#L77-L94
|
[
"def dict_find(d, which_key):\n \"\"\"\n Finds key values in a nested dictionary. Returns a tuple of the dictionary in which\n the key was found along with the value\n \"\"\"\n # If the starting point is a list, iterate recursively over all values\n if isinstance(d, (list, tuple)):\n for i in d:\n for result in dict_find(i, which_key):\n yield result\n\n # Else, iterate over all key values of the dictionary\n elif isinstance(d, dict):\n for k, v in d.items():\n if k == which_key:\n yield d, v\n for result in dict_find(v, which_key):\n yield result\n"
] |
"""
A module for loading contexts using context hints.
"""
from collections import defaultdict
import six
from django.conf import settings
from django.db.models import Q
try:
# Django 1.9
from django.apps import apps
get_model = apps.get_model
except ImportError: # pragma: no cover
# Django < 1.9
from django.db.models import get_model
from manager_utils import id_dict
from entity_event.models import ContextRenderer
def get_context_hints_per_source(context_renderers):
"""
Given a list of context renderers, return a dictionary of context hints per source.
"""
# Merge the context render hints for each source as there can be multiple context hints for
# sources depending on the render target. Merging them together involves combining select
# and prefetch related hints for each context renderer
context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {
'app_name': None,
'model_name': None,
'select_related': set(),
'prefetch_related': set(),
}))
for cr in context_renderers:
for key, hints in cr.context_hints.items() if cr.context_hints else []:
for source in cr.get_sources():
context_hints_per_source[source][key]['app_name'] = hints['app_name']
context_hints_per_source[source][key]['model_name'] = hints['model_name']
context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))
context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))
return context_hints_per_source
def get_querysets_for_context_hints(context_hints_per_source):
"""
Given a list of context hint dictionaries, return a dictionary
of querysets for efficient context loading. The return value
is structured as follows:
{
model: queryset,
...
}
"""
model_select_relateds = defaultdict(set)
model_prefetch_relateds = defaultdict(set)
model_querysets = {}
for context_hints in context_hints_per_source.values():
for hints in context_hints.values():
model = get_model(hints['app_name'], hints['model_name'])
model_querysets[model] = model.objects
model_select_relateds[model].update(hints.get('select_related', []))
model_prefetch_relateds[model].update(hints.get('prefetch_related', []))
# Attach select and prefetch related parameters to the querysets if needed
for model, queryset in model_querysets.items():
if model_select_relateds[model]:
queryset = queryset.select_related(*model_select_relateds[model])
if model_prefetch_relateds[model]:
queryset = queryset.prefetch_related(*model_prefetch_relateds[model])
model_querysets[model] = queryset
return model_querysets
def get_model_ids_to_fetch(events, context_hints_per_source):
"""
Obtains the ids of all models that need to be fetched. Returns a dictionary of models that
point to sets of ids that need to be fetched. Return output is as follows:
{
model: [id1, id2, ...],
...
}
"""
number_types = (complex, float) + six.integer_types
model_ids_to_fetch = defaultdict(set)
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
for d, value in dict_find(event.context, context_key):
values = value if isinstance(value, list) else [value]
model_ids_to_fetch[get_model(hints['app_name'], hints['model_name'])].update(
v for v in values if isinstance(v, number_types)
)
return model_ids_to_fetch
def fetch_model_data(model_querysets, model_ids_to_fetch):
"""
Given a dictionary of models to querysets and model IDs to models, fetch the IDs
for every model and return the objects in the following structure.
{
model: {
id: obj,
...
},
...
}
"""
return {
model: id_dict(model_querysets[model].filter(id__in=ids_to_fetch))
for model, ids_to_fetch in model_ids_to_fetch.items()
}
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
"""
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
"""
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value)
def load_renderers_into_events(events, mediums, context_renderers, default_rendering_style):
"""
Given the events and the context renderers, load the renderers into the event objects
so that they may be able to call the 'render' method later on.
"""
# Make a mapping of source groups and rendering styles to context renderers. Do
# the same for sources and rendering styles to context renderers
source_group_style_to_renderer = {
(cr.source_group_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_group_id
}
source_style_to_renderer = {
(cr.source_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_id
}
for e in events:
for m in mediums:
# Try the following when loading a context renderer for a medium in an event.
# 1. Try to look up the renderer based on the source group and medium rendering style
# 2. If step 1 doesn't work, look up based on the source and medium rendering style
# 3. If step 2 doesn't work, look up based on the source group and default rendering style
# 4. if step 3 doesn't work, look up based on the source and default rendering style
# If none of those steps work, this event will not be able to be rendered for the mediun
cr = source_group_style_to_renderer.get((e.source.group_id, m.rendering_style_id))
if not cr:
cr = source_style_to_renderer.get((e.source_id, m.rendering_style_id))
if not cr and default_rendering_style:
cr = source_group_style_to_renderer.get((e.source.group_id, default_rendering_style.id))
if not cr and default_rendering_style:
cr = source_style_to_renderer.get((e.source_id, default_rendering_style.id))
if cr:
e._context_renderers[m] = cr
def get_default_rendering_style():
default_rendering_style = getattr(settings, 'DEFAULT_ENTITY_EVENT_RENDERING_STYLE', None)
if default_rendering_style:
default_rendering_style = get_model('entity_event', 'RenderingStyle').objects.get(name=default_rendering_style)
return default_rendering_style
def load_contexts_and_renderers(events, mediums):
"""
Given a list of events and mediums, load the context model data into the contexts of the events.
"""
sources = {event.source for event in events}
rendering_styles = {medium.rendering_style for medium in mediums if medium.rendering_style}
# Fetch the default rendering style and add it to the set of rendering styles
default_rendering_style = get_default_rendering_style()
if default_rendering_style:
rendering_styles.add(default_rendering_style)
context_renderers = ContextRenderer.objects.filter(
Q(source__in=sources, rendering_style__in=rendering_styles) |
Q(source_group_id__in=[s.group_id for s in sources], rendering_style__in=rendering_styles)).select_related(
'source', 'rendering_style').prefetch_related('source_group__source_set')
context_hints_per_source = get_context_hints_per_source(context_renderers)
model_querysets = get_querysets_for_context_hints(context_hints_per_source)
model_ids_to_fetch = get_model_ids_to_fetch(events, context_hints_per_source)
model_data = fetch_model_data(model_querysets, model_ids_to_fetch)
load_fetched_objects_into_contexts(events, model_data, context_hints_per_source)
load_renderers_into_events(events, mediums, context_renderers, default_rendering_style)
return events
|
ambitioninc/django-entity-event
|
entity_event/context_loader.py
|
get_model_ids_to_fetch
|
python
|
def get_model_ids_to_fetch(events, context_hints_per_source):
number_types = (complex, float) + six.integer_types
model_ids_to_fetch = defaultdict(set)
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
for d, value in dict_find(event.context, context_key):
values = value if isinstance(value, list) else [value]
model_ids_to_fetch[get_model(hints['app_name'], hints['model_name'])].update(
v for v in values if isinstance(v, number_types)
)
return model_ids_to_fetch
|
Obtains the ids of all models that need to be fetched. Returns a dictionary of models that
point to sets of ids that need to be fetched. Return output is as follows:
{
model: [id1, id2, ...],
...
}
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_loader.py#L97-L119
|
[
"def dict_find(d, which_key):\n \"\"\"\n Finds key values in a nested dictionary. Returns a tuple of the dictionary in which\n the key was found along with the value\n \"\"\"\n # If the starting point is a list, iterate recursively over all values\n if isinstance(d, (list, tuple)):\n for i in d:\n for result in dict_find(i, which_key):\n yield result\n\n # Else, iterate over all key values of the dictionary\n elif isinstance(d, dict):\n for k, v in d.items():\n if k == which_key:\n yield d, v\n for result in dict_find(v, which_key):\n yield result\n"
] |
"""
A module for loading contexts using context hints.
"""
from collections import defaultdict
import six
from django.conf import settings
from django.db.models import Q
try:
# Django 1.9
from django.apps import apps
get_model = apps.get_model
except ImportError: # pragma: no cover
# Django < 1.9
from django.db.models import get_model
from manager_utils import id_dict
from entity_event.models import ContextRenderer
def get_context_hints_per_source(context_renderers):
"""
Given a list of context renderers, return a dictionary of context hints per source.
"""
# Merge the context render hints for each source as there can be multiple context hints for
# sources depending on the render target. Merging them together involves combining select
# and prefetch related hints for each context renderer
context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {
'app_name': None,
'model_name': None,
'select_related': set(),
'prefetch_related': set(),
}))
for cr in context_renderers:
for key, hints in cr.context_hints.items() if cr.context_hints else []:
for source in cr.get_sources():
context_hints_per_source[source][key]['app_name'] = hints['app_name']
context_hints_per_source[source][key]['model_name'] = hints['model_name']
context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))
context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))
return context_hints_per_source
def get_querysets_for_context_hints(context_hints_per_source):
"""
Given a list of context hint dictionaries, return a dictionary
of querysets for efficient context loading. The return value
is structured as follows:
{
model: queryset,
...
}
"""
model_select_relateds = defaultdict(set)
model_prefetch_relateds = defaultdict(set)
model_querysets = {}
for context_hints in context_hints_per_source.values():
for hints in context_hints.values():
model = get_model(hints['app_name'], hints['model_name'])
model_querysets[model] = model.objects
model_select_relateds[model].update(hints.get('select_related', []))
model_prefetch_relateds[model].update(hints.get('prefetch_related', []))
# Attach select and prefetch related parameters to the querysets if needed
for model, queryset in model_querysets.items():
if model_select_relateds[model]:
queryset = queryset.select_related(*model_select_relateds[model])
if model_prefetch_relateds[model]:
queryset = queryset.prefetch_related(*model_prefetch_relateds[model])
model_querysets[model] = queryset
return model_querysets
def dict_find(d, which_key):
"""
Finds key values in a nested dictionary. Returns a tuple of the dictionary in which
the key was found along with the value
"""
# If the starting point is a list, iterate recursively over all values
if isinstance(d, (list, tuple)):
for i in d:
for result in dict_find(i, which_key):
yield result
# Else, iterate over all key values of the dictionary
elif isinstance(d, dict):
for k, v in d.items():
if k == which_key:
yield d, v
for result in dict_find(v, which_key):
yield result
def fetch_model_data(model_querysets, model_ids_to_fetch):
"""
Given a dictionary of models to querysets and model IDs to models, fetch the IDs
for every model and return the objects in the following structure.
{
model: {
id: obj,
...
},
...
}
"""
return {
model: id_dict(model_querysets[model].filter(id__in=ids_to_fetch))
for model, ids_to_fetch in model_ids_to_fetch.items()
}
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
"""
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
"""
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value)
def load_renderers_into_events(events, mediums, context_renderers, default_rendering_style):
"""
Given the events and the context renderers, load the renderers into the event objects
so that they may be able to call the 'render' method later on.
"""
# Make a mapping of source groups and rendering styles to context renderers. Do
# the same for sources and rendering styles to context renderers
source_group_style_to_renderer = {
(cr.source_group_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_group_id
}
source_style_to_renderer = {
(cr.source_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_id
}
for e in events:
for m in mediums:
# Try the following when loading a context renderer for a medium in an event.
# 1. Try to look up the renderer based on the source group and medium rendering style
# 2. If step 1 doesn't work, look up based on the source and medium rendering style
# 3. If step 2 doesn't work, look up based on the source group and default rendering style
# 4. if step 3 doesn't work, look up based on the source and default rendering style
# If none of those steps work, this event will not be able to be rendered for the mediun
cr = source_group_style_to_renderer.get((e.source.group_id, m.rendering_style_id))
if not cr:
cr = source_style_to_renderer.get((e.source_id, m.rendering_style_id))
if not cr and default_rendering_style:
cr = source_group_style_to_renderer.get((e.source.group_id, default_rendering_style.id))
if not cr and default_rendering_style:
cr = source_style_to_renderer.get((e.source_id, default_rendering_style.id))
if cr:
e._context_renderers[m] = cr
def get_default_rendering_style():
default_rendering_style = getattr(settings, 'DEFAULT_ENTITY_EVENT_RENDERING_STYLE', None)
if default_rendering_style:
default_rendering_style = get_model('entity_event', 'RenderingStyle').objects.get(name=default_rendering_style)
return default_rendering_style
def load_contexts_and_renderers(events, mediums):
"""
Given a list of events and mediums, load the context model data into the contexts of the events.
"""
sources = {event.source for event in events}
rendering_styles = {medium.rendering_style for medium in mediums if medium.rendering_style}
# Fetch the default rendering style and add it to the set of rendering styles
default_rendering_style = get_default_rendering_style()
if default_rendering_style:
rendering_styles.add(default_rendering_style)
context_renderers = ContextRenderer.objects.filter(
Q(source__in=sources, rendering_style__in=rendering_styles) |
Q(source_group_id__in=[s.group_id for s in sources], rendering_style__in=rendering_styles)).select_related(
'source', 'rendering_style').prefetch_related('source_group__source_set')
context_hints_per_source = get_context_hints_per_source(context_renderers)
model_querysets = get_querysets_for_context_hints(context_hints_per_source)
model_ids_to_fetch = get_model_ids_to_fetch(events, context_hints_per_source)
model_data = fetch_model_data(model_querysets, model_ids_to_fetch)
load_fetched_objects_into_contexts(events, model_data, context_hints_per_source)
load_renderers_into_events(events, mediums, context_renderers, default_rendering_style)
return events
|
ambitioninc/django-entity-event
|
entity_event/context_loader.py
|
fetch_model_data
|
python
|
def fetch_model_data(model_querysets, model_ids_to_fetch):
return {
model: id_dict(model_querysets[model].filter(id__in=ids_to_fetch))
for model, ids_to_fetch in model_ids_to_fetch.items()
}
|
Given a dictionary of models to querysets and model IDs to models, fetch the IDs
for every model and return the objects in the following structure.
{
model: {
id: obj,
...
},
...
}
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_loader.py#L122-L138
| null |
"""
A module for loading contexts using context hints.
"""
from collections import defaultdict
import six
from django.conf import settings
from django.db.models import Q
try:
# Django 1.9
from django.apps import apps
get_model = apps.get_model
except ImportError: # pragma: no cover
# Django < 1.9
from django.db.models import get_model
from manager_utils import id_dict
from entity_event.models import ContextRenderer
def get_context_hints_per_source(context_renderers):
"""
Given a list of context renderers, return a dictionary of context hints per source.
"""
# Merge the context render hints for each source as there can be multiple context hints for
# sources depending on the render target. Merging them together involves combining select
# and prefetch related hints for each context renderer
context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {
'app_name': None,
'model_name': None,
'select_related': set(),
'prefetch_related': set(),
}))
for cr in context_renderers:
for key, hints in cr.context_hints.items() if cr.context_hints else []:
for source in cr.get_sources():
context_hints_per_source[source][key]['app_name'] = hints['app_name']
context_hints_per_source[source][key]['model_name'] = hints['model_name']
context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))
context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))
return context_hints_per_source
def get_querysets_for_context_hints(context_hints_per_source):
"""
Given a list of context hint dictionaries, return a dictionary
of querysets for efficient context loading. The return value
is structured as follows:
{
model: queryset,
...
}
"""
model_select_relateds = defaultdict(set)
model_prefetch_relateds = defaultdict(set)
model_querysets = {}
for context_hints in context_hints_per_source.values():
for hints in context_hints.values():
model = get_model(hints['app_name'], hints['model_name'])
model_querysets[model] = model.objects
model_select_relateds[model].update(hints.get('select_related', []))
model_prefetch_relateds[model].update(hints.get('prefetch_related', []))
# Attach select and prefetch related parameters to the querysets if needed
for model, queryset in model_querysets.items():
if model_select_relateds[model]:
queryset = queryset.select_related(*model_select_relateds[model])
if model_prefetch_relateds[model]:
queryset = queryset.prefetch_related(*model_prefetch_relateds[model])
model_querysets[model] = queryset
return model_querysets
def dict_find(d, which_key):
"""
Finds key values in a nested dictionary. Returns a tuple of the dictionary in which
the key was found along with the value
"""
# If the starting point is a list, iterate recursively over all values
if isinstance(d, (list, tuple)):
for i in d:
for result in dict_find(i, which_key):
yield result
# Else, iterate over all key values of the dictionary
elif isinstance(d, dict):
for k, v in d.items():
if k == which_key:
yield d, v
for result in dict_find(v, which_key):
yield result
def get_model_ids_to_fetch(events, context_hints_per_source):
"""
Obtains the ids of all models that need to be fetched. Returns a dictionary of models that
point to sets of ids that need to be fetched. Return output is as follows:
{
model: [id1, id2, ...],
...
}
"""
number_types = (complex, float) + six.integer_types
model_ids_to_fetch = defaultdict(set)
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
for d, value in dict_find(event.context, context_key):
values = value if isinstance(value, list) else [value]
model_ids_to_fetch[get_model(hints['app_name'], hints['model_name'])].update(
v for v in values if isinstance(v, number_types)
)
return model_ids_to_fetch
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
"""
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
"""
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value)
def load_renderers_into_events(events, mediums, context_renderers, default_rendering_style):
"""
Given the events and the context renderers, load the renderers into the event objects
so that they may be able to call the 'render' method later on.
"""
# Make a mapping of source groups and rendering styles to context renderers. Do
# the same for sources and rendering styles to context renderers
source_group_style_to_renderer = {
(cr.source_group_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_group_id
}
source_style_to_renderer = {
(cr.source_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_id
}
for e in events:
for m in mediums:
# Try the following when loading a context renderer for a medium in an event.
# 1. Try to look up the renderer based on the source group and medium rendering style
# 2. If step 1 doesn't work, look up based on the source and medium rendering style
# 3. If step 2 doesn't work, look up based on the source group and default rendering style
# 4. if step 3 doesn't work, look up based on the source and default rendering style
# If none of those steps work, this event will not be able to be rendered for the mediun
cr = source_group_style_to_renderer.get((e.source.group_id, m.rendering_style_id))
if not cr:
cr = source_style_to_renderer.get((e.source_id, m.rendering_style_id))
if not cr and default_rendering_style:
cr = source_group_style_to_renderer.get((e.source.group_id, default_rendering_style.id))
if not cr and default_rendering_style:
cr = source_style_to_renderer.get((e.source_id, default_rendering_style.id))
if cr:
e._context_renderers[m] = cr
def get_default_rendering_style():
default_rendering_style = getattr(settings, 'DEFAULT_ENTITY_EVENT_RENDERING_STYLE', None)
if default_rendering_style:
default_rendering_style = get_model('entity_event', 'RenderingStyle').objects.get(name=default_rendering_style)
return default_rendering_style
def load_contexts_and_renderers(events, mediums):
"""
Given a list of events and mediums, load the context model data into the contexts of the events.
"""
sources = {event.source for event in events}
rendering_styles = {medium.rendering_style for medium in mediums if medium.rendering_style}
# Fetch the default rendering style and add it to the set of rendering styles
default_rendering_style = get_default_rendering_style()
if default_rendering_style:
rendering_styles.add(default_rendering_style)
context_renderers = ContextRenderer.objects.filter(
Q(source__in=sources, rendering_style__in=rendering_styles) |
Q(source_group_id__in=[s.group_id for s in sources], rendering_style__in=rendering_styles)).select_related(
'source', 'rendering_style').prefetch_related('source_group__source_set')
context_hints_per_source = get_context_hints_per_source(context_renderers)
model_querysets = get_querysets_for_context_hints(context_hints_per_source)
model_ids_to_fetch = get_model_ids_to_fetch(events, context_hints_per_source)
model_data = fetch_model_data(model_querysets, model_ids_to_fetch)
load_fetched_objects_into_contexts(events, model_data, context_hints_per_source)
load_renderers_into_events(events, mediums, context_renderers, default_rendering_style)
return events
|
ambitioninc/django-entity-event
|
entity_event/context_loader.py
|
load_fetched_objects_into_contexts
|
python
|
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value)
|
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_loader.py#L141-L155
|
[
"def dict_find(d, which_key):\n \"\"\"\n Finds key values in a nested dictionary. Returns a tuple of the dictionary in which\n the key was found along with the value\n \"\"\"\n # If the starting point is a list, iterate recursively over all values\n if isinstance(d, (list, tuple)):\n for i in d:\n for result in dict_find(i, which_key):\n yield result\n\n # Else, iterate over all key values of the dictionary\n elif isinstance(d, dict):\n for k, v in d.items():\n if k == which_key:\n yield d, v\n for result in dict_find(v, which_key):\n yield result\n"
] |
"""
A module for loading contexts using context hints.
"""
from collections import defaultdict
import six
from django.conf import settings
from django.db.models import Q
try:
# Django 1.9
from django.apps import apps
get_model = apps.get_model
except ImportError: # pragma: no cover
# Django < 1.9
from django.db.models import get_model
from manager_utils import id_dict
from entity_event.models import ContextRenderer
def get_context_hints_per_source(context_renderers):
"""
Given a list of context renderers, return a dictionary of context hints per source.
"""
# Merge the context render hints for each source as there can be multiple context hints for
# sources depending on the render target. Merging them together involves combining select
# and prefetch related hints for each context renderer
context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {
'app_name': None,
'model_name': None,
'select_related': set(),
'prefetch_related': set(),
}))
for cr in context_renderers:
for key, hints in cr.context_hints.items() if cr.context_hints else []:
for source in cr.get_sources():
context_hints_per_source[source][key]['app_name'] = hints['app_name']
context_hints_per_source[source][key]['model_name'] = hints['model_name']
context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))
context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))
return context_hints_per_source
def get_querysets_for_context_hints(context_hints_per_source):
"""
Given a list of context hint dictionaries, return a dictionary
of querysets for efficient context loading. The return value
is structured as follows:
{
model: queryset,
...
}
"""
model_select_relateds = defaultdict(set)
model_prefetch_relateds = defaultdict(set)
model_querysets = {}
for context_hints in context_hints_per_source.values():
for hints in context_hints.values():
model = get_model(hints['app_name'], hints['model_name'])
model_querysets[model] = model.objects
model_select_relateds[model].update(hints.get('select_related', []))
model_prefetch_relateds[model].update(hints.get('prefetch_related', []))
# Attach select and prefetch related parameters to the querysets if needed
for model, queryset in model_querysets.items():
if model_select_relateds[model]:
queryset = queryset.select_related(*model_select_relateds[model])
if model_prefetch_relateds[model]:
queryset = queryset.prefetch_related(*model_prefetch_relateds[model])
model_querysets[model] = queryset
return model_querysets
def dict_find(d, which_key):
"""
Finds key values in a nested dictionary. Returns a tuple of the dictionary in which
the key was found along with the value
"""
# If the starting point is a list, iterate recursively over all values
if isinstance(d, (list, tuple)):
for i in d:
for result in dict_find(i, which_key):
yield result
# Else, iterate over all key values of the dictionary
elif isinstance(d, dict):
for k, v in d.items():
if k == which_key:
yield d, v
for result in dict_find(v, which_key):
yield result
def get_model_ids_to_fetch(events, context_hints_per_source):
"""
Obtains the ids of all models that need to be fetched. Returns a dictionary of models that
point to sets of ids that need to be fetched. Return output is as follows:
{
model: [id1, id2, ...],
...
}
"""
number_types = (complex, float) + six.integer_types
model_ids_to_fetch = defaultdict(set)
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
for d, value in dict_find(event.context, context_key):
values = value if isinstance(value, list) else [value]
model_ids_to_fetch[get_model(hints['app_name'], hints['model_name'])].update(
v for v in values if isinstance(v, number_types)
)
return model_ids_to_fetch
def fetch_model_data(model_querysets, model_ids_to_fetch):
"""
Given a dictionary of models to querysets and model IDs to models, fetch the IDs
for every model and return the objects in the following structure.
{
model: {
id: obj,
...
},
...
}
"""
return {
model: id_dict(model_querysets[model].filter(id__in=ids_to_fetch))
for model, ids_to_fetch in model_ids_to_fetch.items()
}
def load_renderers_into_events(events, mediums, context_renderers, default_rendering_style):
"""
Given the events and the context renderers, load the renderers into the event objects
so that they may be able to call the 'render' method later on.
"""
# Make a mapping of source groups and rendering styles to context renderers. Do
# the same for sources and rendering styles to context renderers
source_group_style_to_renderer = {
(cr.source_group_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_group_id
}
source_style_to_renderer = {
(cr.source_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_id
}
for e in events:
for m in mediums:
# Try the following when loading a context renderer for a medium in an event.
# 1. Try to look up the renderer based on the source group and medium rendering style
# 2. If step 1 doesn't work, look up based on the source and medium rendering style
# 3. If step 2 doesn't work, look up based on the source group and default rendering style
# 4. if step 3 doesn't work, look up based on the source and default rendering style
# If none of those steps work, this event will not be able to be rendered for the mediun
cr = source_group_style_to_renderer.get((e.source.group_id, m.rendering_style_id))
if not cr:
cr = source_style_to_renderer.get((e.source_id, m.rendering_style_id))
if not cr and default_rendering_style:
cr = source_group_style_to_renderer.get((e.source.group_id, default_rendering_style.id))
if not cr and default_rendering_style:
cr = source_style_to_renderer.get((e.source_id, default_rendering_style.id))
if cr:
e._context_renderers[m] = cr
def get_default_rendering_style():
default_rendering_style = getattr(settings, 'DEFAULT_ENTITY_EVENT_RENDERING_STYLE', None)
if default_rendering_style:
default_rendering_style = get_model('entity_event', 'RenderingStyle').objects.get(name=default_rendering_style)
return default_rendering_style
def load_contexts_and_renderers(events, mediums):
"""
Given a list of events and mediums, load the context model data into the contexts of the events.
"""
sources = {event.source for event in events}
rendering_styles = {medium.rendering_style for medium in mediums if medium.rendering_style}
# Fetch the default rendering style and add it to the set of rendering styles
default_rendering_style = get_default_rendering_style()
if default_rendering_style:
rendering_styles.add(default_rendering_style)
context_renderers = ContextRenderer.objects.filter(
Q(source__in=sources, rendering_style__in=rendering_styles) |
Q(source_group_id__in=[s.group_id for s in sources], rendering_style__in=rendering_styles)).select_related(
'source', 'rendering_style').prefetch_related('source_group__source_set')
context_hints_per_source = get_context_hints_per_source(context_renderers)
model_querysets = get_querysets_for_context_hints(context_hints_per_source)
model_ids_to_fetch = get_model_ids_to_fetch(events, context_hints_per_source)
model_data = fetch_model_data(model_querysets, model_ids_to_fetch)
load_fetched_objects_into_contexts(events, model_data, context_hints_per_source)
load_renderers_into_events(events, mediums, context_renderers, default_rendering_style)
return events
|
ambitioninc/django-entity-event
|
entity_event/context_loader.py
|
load_renderers_into_events
|
python
|
def load_renderers_into_events(events, mediums, context_renderers, default_rendering_style):
# Make a mapping of source groups and rendering styles to context renderers. Do
# the same for sources and rendering styles to context renderers
source_group_style_to_renderer = {
(cr.source_group_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_group_id
}
source_style_to_renderer = {
(cr.source_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_id
}
for e in events:
for m in mediums:
# Try the following when loading a context renderer for a medium in an event.
# 1. Try to look up the renderer based on the source group and medium rendering style
# 2. If step 1 doesn't work, look up based on the source and medium rendering style
# 3. If step 2 doesn't work, look up based on the source group and default rendering style
# 4. if step 3 doesn't work, look up based on the source and default rendering style
# If none of those steps work, this event will not be able to be rendered for the mediun
cr = source_group_style_to_renderer.get((e.source.group_id, m.rendering_style_id))
if not cr:
cr = source_style_to_renderer.get((e.source_id, m.rendering_style_id))
if not cr and default_rendering_style:
cr = source_group_style_to_renderer.get((e.source.group_id, default_rendering_style.id))
if not cr and default_rendering_style:
cr = source_style_to_renderer.get((e.source_id, default_rendering_style.id))
if cr:
e._context_renderers[m] = cr
|
Given the events and the context renderers, load the renderers into the event objects
so that they may be able to call the 'render' method later on.
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_loader.py#L158-L191
| null |
"""
A module for loading contexts using context hints.
"""
from collections import defaultdict
import six
from django.conf import settings
from django.db.models import Q
try:
# Django 1.9
from django.apps import apps
get_model = apps.get_model
except ImportError: # pragma: no cover
# Django < 1.9
from django.db.models import get_model
from manager_utils import id_dict
from entity_event.models import ContextRenderer
def get_context_hints_per_source(context_renderers):
"""
Given a list of context renderers, return a dictionary of context hints per source.
"""
# Merge the context render hints for each source as there can be multiple context hints for
# sources depending on the render target. Merging them together involves combining select
# and prefetch related hints for each context renderer
context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {
'app_name': None,
'model_name': None,
'select_related': set(),
'prefetch_related': set(),
}))
for cr in context_renderers:
for key, hints in cr.context_hints.items() if cr.context_hints else []:
for source in cr.get_sources():
context_hints_per_source[source][key]['app_name'] = hints['app_name']
context_hints_per_source[source][key]['model_name'] = hints['model_name']
context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))
context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))
return context_hints_per_source
def get_querysets_for_context_hints(context_hints_per_source):
"""
Given a list of context hint dictionaries, return a dictionary
of querysets for efficient context loading. The return value
is structured as follows:
{
model: queryset,
...
}
"""
model_select_relateds = defaultdict(set)
model_prefetch_relateds = defaultdict(set)
model_querysets = {}
for context_hints in context_hints_per_source.values():
for hints in context_hints.values():
model = get_model(hints['app_name'], hints['model_name'])
model_querysets[model] = model.objects
model_select_relateds[model].update(hints.get('select_related', []))
model_prefetch_relateds[model].update(hints.get('prefetch_related', []))
# Attach select and prefetch related parameters to the querysets if needed
for model, queryset in model_querysets.items():
if model_select_relateds[model]:
queryset = queryset.select_related(*model_select_relateds[model])
if model_prefetch_relateds[model]:
queryset = queryset.prefetch_related(*model_prefetch_relateds[model])
model_querysets[model] = queryset
return model_querysets
def dict_find(d, which_key):
"""
Finds key values in a nested dictionary. Returns a tuple of the dictionary in which
the key was found along with the value
"""
# If the starting point is a list, iterate recursively over all values
if isinstance(d, (list, tuple)):
for i in d:
for result in dict_find(i, which_key):
yield result
# Else, iterate over all key values of the dictionary
elif isinstance(d, dict):
for k, v in d.items():
if k == which_key:
yield d, v
for result in dict_find(v, which_key):
yield result
def get_model_ids_to_fetch(events, context_hints_per_source):
"""
Obtains the ids of all models that need to be fetched. Returns a dictionary of models that
point to sets of ids that need to be fetched. Return output is as follows:
{
model: [id1, id2, ...],
...
}
"""
number_types = (complex, float) + six.integer_types
model_ids_to_fetch = defaultdict(set)
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
for d, value in dict_find(event.context, context_key):
values = value if isinstance(value, list) else [value]
model_ids_to_fetch[get_model(hints['app_name'], hints['model_name'])].update(
v for v in values if isinstance(v, number_types)
)
return model_ids_to_fetch
def fetch_model_data(model_querysets, model_ids_to_fetch):
"""
Given a dictionary of models to querysets and model IDs to models, fetch the IDs
for every model and return the objects in the following structure.
{
model: {
id: obj,
...
},
...
}
"""
return {
model: id_dict(model_querysets[model].filter(id__in=ids_to_fetch))
for model, ids_to_fetch in model_ids_to_fetch.items()
}
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
"""
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
"""
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value)
def get_default_rendering_style():
default_rendering_style = getattr(settings, 'DEFAULT_ENTITY_EVENT_RENDERING_STYLE', None)
if default_rendering_style:
default_rendering_style = get_model('entity_event', 'RenderingStyle').objects.get(name=default_rendering_style)
return default_rendering_style
def load_contexts_and_renderers(events, mediums):
"""
Given a list of events and mediums, load the context model data into the contexts of the events.
"""
sources = {event.source for event in events}
rendering_styles = {medium.rendering_style for medium in mediums if medium.rendering_style}
# Fetch the default rendering style and add it to the set of rendering styles
default_rendering_style = get_default_rendering_style()
if default_rendering_style:
rendering_styles.add(default_rendering_style)
context_renderers = ContextRenderer.objects.filter(
Q(source__in=sources, rendering_style__in=rendering_styles) |
Q(source_group_id__in=[s.group_id for s in sources], rendering_style__in=rendering_styles)).select_related(
'source', 'rendering_style').prefetch_related('source_group__source_set')
context_hints_per_source = get_context_hints_per_source(context_renderers)
model_querysets = get_querysets_for_context_hints(context_hints_per_source)
model_ids_to_fetch = get_model_ids_to_fetch(events, context_hints_per_source)
model_data = fetch_model_data(model_querysets, model_ids_to_fetch)
load_fetched_objects_into_contexts(events, model_data, context_hints_per_source)
load_renderers_into_events(events, mediums, context_renderers, default_rendering_style)
return events
|
ambitioninc/django-entity-event
|
entity_event/context_loader.py
|
load_contexts_and_renderers
|
python
|
def load_contexts_and_renderers(events, mediums):
sources = {event.source for event in events}
rendering_styles = {medium.rendering_style for medium in mediums if medium.rendering_style}
# Fetch the default rendering style and add it to the set of rendering styles
default_rendering_style = get_default_rendering_style()
if default_rendering_style:
rendering_styles.add(default_rendering_style)
context_renderers = ContextRenderer.objects.filter(
Q(source__in=sources, rendering_style__in=rendering_styles) |
Q(source_group_id__in=[s.group_id for s in sources], rendering_style__in=rendering_styles)).select_related(
'source', 'rendering_style').prefetch_related('source_group__source_set')
context_hints_per_source = get_context_hints_per_source(context_renderers)
model_querysets = get_querysets_for_context_hints(context_hints_per_source)
model_ids_to_fetch = get_model_ids_to_fetch(events, context_hints_per_source)
model_data = fetch_model_data(model_querysets, model_ids_to_fetch)
load_fetched_objects_into_contexts(events, model_data, context_hints_per_source)
load_renderers_into_events(events, mediums, context_renderers, default_rendering_style)
return events
|
Given a list of events and mediums, load the context model data into the contexts of the events.
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_loader.py#L202-L226
|
[
"def get_context_hints_per_source(context_renderers):\n \"\"\"\n Given a list of context renderers, return a dictionary of context hints per source.\n \"\"\"\n # Merge the context render hints for each source as there can be multiple context hints for\n # sources depending on the render target. Merging them together involves combining select\n # and prefetch related hints for each context renderer\n context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {\n 'app_name': None,\n 'model_name': None,\n 'select_related': set(),\n 'prefetch_related': set(),\n }))\n for cr in context_renderers:\n for key, hints in cr.context_hints.items() if cr.context_hints else []:\n for source in cr.get_sources():\n context_hints_per_source[source][key]['app_name'] = hints['app_name']\n context_hints_per_source[source][key]['model_name'] = hints['model_name']\n context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))\n context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))\n\n return context_hints_per_source\n",
"def get_querysets_for_context_hints(context_hints_per_source):\n \"\"\"\n Given a list of context hint dictionaries, return a dictionary\n of querysets for efficient context loading. The return value\n is structured as follows:\n\n {\n model: queryset,\n ...\n }\n \"\"\"\n model_select_relateds = defaultdict(set)\n model_prefetch_relateds = defaultdict(set)\n model_querysets = {}\n for context_hints in context_hints_per_source.values():\n for hints in context_hints.values():\n model = get_model(hints['app_name'], hints['model_name'])\n model_querysets[model] = model.objects\n model_select_relateds[model].update(hints.get('select_related', []))\n model_prefetch_relateds[model].update(hints.get('prefetch_related', []))\n\n # Attach select and prefetch related parameters to the querysets if needed\n for model, queryset in model_querysets.items():\n if model_select_relateds[model]:\n queryset = queryset.select_related(*model_select_relateds[model])\n if model_prefetch_relateds[model]:\n queryset = queryset.prefetch_related(*model_prefetch_relateds[model])\n model_querysets[model] = queryset\n\n return model_querysets\n",
"def get_model_ids_to_fetch(events, context_hints_per_source):\n \"\"\"\n Obtains the ids of all models that need to be fetched. Returns a dictionary of models that\n point to sets of ids that need to be fetched. Return output is as follows:\n\n {\n model: [id1, id2, ...],\n ...\n }\n \"\"\"\n number_types = (complex, float) + six.integer_types\n model_ids_to_fetch = defaultdict(set)\n\n for event in events:\n context_hints = context_hints_per_source.get(event.source, {})\n for context_key, hints in context_hints.items():\n for d, value in dict_find(event.context, context_key):\n values = value if isinstance(value, list) else [value]\n model_ids_to_fetch[get_model(hints['app_name'], hints['model_name'])].update(\n v for v in values if isinstance(v, number_types)\n )\n\n return model_ids_to_fetch\n",
"def fetch_model_data(model_querysets, model_ids_to_fetch):\n \"\"\"\n Given a dictionary of models to querysets and model IDs to models, fetch the IDs\n for every model and return the objects in the following structure.\n\n {\n model: {\n id: obj,\n ...\n },\n ...\n }\n \"\"\"\n return {\n model: id_dict(model_querysets[model].filter(id__in=ids_to_fetch))\n for model, ids_to_fetch in model_ids_to_fetch.items()\n }\n",
"def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):\n \"\"\"\n Given the fetched model data and the context hints for each source, go through each\n event and populate the contexts with the loaded information.\n \"\"\"\n for event in events:\n context_hints = context_hints_per_source.get(event.source, {})\n for context_key, hints in context_hints.items():\n model = get_model(hints['app_name'], hints['model_name'])\n for d, value in dict_find(event.context, context_key):\n if isinstance(value, list):\n for i, model_id in enumerate(d[context_key]):\n d[context_key][i] = model_data[model].get(model_id)\n else:\n d[context_key] = model_data[model].get(value)\n",
"def load_renderers_into_events(events, mediums, context_renderers, default_rendering_style):\n \"\"\"\n Given the events and the context renderers, load the renderers into the event objects\n so that they may be able to call the 'render' method later on.\n \"\"\"\n # Make a mapping of source groups and rendering styles to context renderers. Do\n # the same for sources and rendering styles to context renderers\n source_group_style_to_renderer = {\n (cr.source_group_id, cr.rendering_style_id): cr\n for cr in context_renderers if cr.source_group_id\n }\n source_style_to_renderer = {\n (cr.source_id, cr.rendering_style_id): cr\n for cr in context_renderers if cr.source_id\n }\n\n for e in events:\n for m in mediums:\n # Try the following when loading a context renderer for a medium in an event.\n # 1. Try to look up the renderer based on the source group and medium rendering style\n # 2. If step 1 doesn't work, look up based on the source and medium rendering style\n # 3. If step 2 doesn't work, look up based on the source group and default rendering style\n # 4. if step 3 doesn't work, look up based on the source and default rendering style\n # If none of those steps work, this event will not be able to be rendered for the mediun\n cr = source_group_style_to_renderer.get((e.source.group_id, m.rendering_style_id))\n if not cr:\n cr = source_style_to_renderer.get((e.source_id, m.rendering_style_id))\n if not cr and default_rendering_style:\n cr = source_group_style_to_renderer.get((e.source.group_id, default_rendering_style.id))\n if not cr and default_rendering_style:\n cr = source_style_to_renderer.get((e.source_id, default_rendering_style.id))\n\n if cr:\n e._context_renderers[m] = cr\n",
"def get_default_rendering_style():\n default_rendering_style = getattr(settings, 'DEFAULT_ENTITY_EVENT_RENDERING_STYLE', None)\n if default_rendering_style:\n default_rendering_style = get_model('entity_event', 'RenderingStyle').objects.get(name=default_rendering_style)\n\n return default_rendering_style\n"
] |
"""
A module for loading contexts using context hints.
"""
from collections import defaultdict
import six
from django.conf import settings
from django.db.models import Q
try:
# Django 1.9
from django.apps import apps
get_model = apps.get_model
except ImportError: # pragma: no cover
# Django < 1.9
from django.db.models import get_model
from manager_utils import id_dict
from entity_event.models import ContextRenderer
def get_context_hints_per_source(context_renderers):
"""
Given a list of context renderers, return a dictionary of context hints per source.
"""
# Merge the context render hints for each source as there can be multiple context hints for
# sources depending on the render target. Merging them together involves combining select
# and prefetch related hints for each context renderer
context_hints_per_source = defaultdict(lambda: defaultdict(lambda: {
'app_name': None,
'model_name': None,
'select_related': set(),
'prefetch_related': set(),
}))
for cr in context_renderers:
for key, hints in cr.context_hints.items() if cr.context_hints else []:
for source in cr.get_sources():
context_hints_per_source[source][key]['app_name'] = hints['app_name']
context_hints_per_source[source][key]['model_name'] = hints['model_name']
context_hints_per_source[source][key]['select_related'].update(hints.get('select_related', []))
context_hints_per_source[source][key]['prefetch_related'].update(hints.get('prefetch_related', []))
return context_hints_per_source
def get_querysets_for_context_hints(context_hints_per_source):
"""
Given a list of context hint dictionaries, return a dictionary
of querysets for efficient context loading. The return value
is structured as follows:
{
model: queryset,
...
}
"""
model_select_relateds = defaultdict(set)
model_prefetch_relateds = defaultdict(set)
model_querysets = {}
for context_hints in context_hints_per_source.values():
for hints in context_hints.values():
model = get_model(hints['app_name'], hints['model_name'])
model_querysets[model] = model.objects
model_select_relateds[model].update(hints.get('select_related', []))
model_prefetch_relateds[model].update(hints.get('prefetch_related', []))
# Attach select and prefetch related parameters to the querysets if needed
for model, queryset in model_querysets.items():
if model_select_relateds[model]:
queryset = queryset.select_related(*model_select_relateds[model])
if model_prefetch_relateds[model]:
queryset = queryset.prefetch_related(*model_prefetch_relateds[model])
model_querysets[model] = queryset
return model_querysets
def dict_find(d, which_key):
"""
Finds key values in a nested dictionary. Returns a tuple of the dictionary in which
the key was found along with the value
"""
# If the starting point is a list, iterate recursively over all values
if isinstance(d, (list, tuple)):
for i in d:
for result in dict_find(i, which_key):
yield result
# Else, iterate over all key values of the dictionary
elif isinstance(d, dict):
for k, v in d.items():
if k == which_key:
yield d, v
for result in dict_find(v, which_key):
yield result
def get_model_ids_to_fetch(events, context_hints_per_source):
"""
Obtains the ids of all models that need to be fetched. Returns a dictionary of models that
point to sets of ids that need to be fetched. Return output is as follows:
{
model: [id1, id2, ...],
...
}
"""
number_types = (complex, float) + six.integer_types
model_ids_to_fetch = defaultdict(set)
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
for d, value in dict_find(event.context, context_key):
values = value if isinstance(value, list) else [value]
model_ids_to_fetch[get_model(hints['app_name'], hints['model_name'])].update(
v for v in values if isinstance(v, number_types)
)
return model_ids_to_fetch
def fetch_model_data(model_querysets, model_ids_to_fetch):
"""
Given a dictionary of models to querysets and model IDs to models, fetch the IDs
for every model and return the objects in the following structure.
{
model: {
id: obj,
...
},
...
}
"""
return {
model: id_dict(model_querysets[model].filter(id__in=ids_to_fetch))
for model, ids_to_fetch in model_ids_to_fetch.items()
}
def load_fetched_objects_into_contexts(events, model_data, context_hints_per_source):
"""
Given the fetched model data and the context hints for each source, go through each
event and populate the contexts with the loaded information.
"""
for event in events:
context_hints = context_hints_per_source.get(event.source, {})
for context_key, hints in context_hints.items():
model = get_model(hints['app_name'], hints['model_name'])
for d, value in dict_find(event.context, context_key):
if isinstance(value, list):
for i, model_id in enumerate(d[context_key]):
d[context_key][i] = model_data[model].get(model_id)
else:
d[context_key] = model_data[model].get(value)
def load_renderers_into_events(events, mediums, context_renderers, default_rendering_style):
"""
Given the events and the context renderers, load the renderers into the event objects
so that they may be able to call the 'render' method later on.
"""
# Make a mapping of source groups and rendering styles to context renderers. Do
# the same for sources and rendering styles to context renderers
source_group_style_to_renderer = {
(cr.source_group_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_group_id
}
source_style_to_renderer = {
(cr.source_id, cr.rendering_style_id): cr
for cr in context_renderers if cr.source_id
}
for e in events:
for m in mediums:
# Try the following when loading a context renderer for a medium in an event.
# 1. Try to look up the renderer based on the source group and medium rendering style
# 2. If step 1 doesn't work, look up based on the source and medium rendering style
# 3. If step 2 doesn't work, look up based on the source group and default rendering style
# 4. if step 3 doesn't work, look up based on the source and default rendering style
# If none of those steps work, this event will not be able to be rendered for the mediun
cr = source_group_style_to_renderer.get((e.source.group_id, m.rendering_style_id))
if not cr:
cr = source_style_to_renderer.get((e.source_id, m.rendering_style_id))
if not cr and default_rendering_style:
cr = source_group_style_to_renderer.get((e.source.group_id, default_rendering_style.id))
if not cr and default_rendering_style:
cr = source_style_to_renderer.get((e.source_id, default_rendering_style.id))
if cr:
e._context_renderers[m] = cr
def get_default_rendering_style():
default_rendering_style = getattr(settings, 'DEFAULT_ENTITY_EVENT_RENDERING_STYLE', None)
if default_rendering_style:
default_rendering_style = get_model('entity_event', 'RenderingStyle').objects.get(name=default_rendering_style)
return default_rendering_style
|
ambitioninc/django-entity-event
|
entity_event/models.py
|
_unseen_event_ids
|
python
|
def _unseen_event_ids(medium):
query = '''
SELECT event.id
FROM entity_event_event AS event
LEFT OUTER JOIN (SELECT *
FROM entity_event_eventseen AS seen
WHERE seen.medium_id=%s) AS eventseen
ON event.id = eventseen.event_id
WHERE eventseen.medium_id IS NULL
'''
unseen_events = Event.objects.raw(query, params=[medium.id])
ids = [e.id for e in unseen_events]
return ids
|
Return all events that have not been seen on this medium.
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/models.py#L1191-L1206
| null |
from collections import defaultdict
from datetime import datetime
from operator import or_
from six.moves import reduce
from cached_property import cached_property
from django.contrib.postgres.fields import JSONField
from django.core.serializers.json import DjangoJSONEncoder
from django.db import models, transaction
from django.db.models import Q
from django.db.models.query import QuerySet
from django.template.loader import render_to_string
from django.template import Context, Template
from django.utils.encoding import python_2_unicode_compatible
from entity.models import Entity, EntityRelationship
from entity_event.context_serializer import DefaultContextSerializer
@python_2_unicode_compatible
class Medium(models.Model):
"""
A ``Medium`` is an object in the database that defines the method
by which users will view events. The actual objects in the
database are fairly simple, only requiring a ``name``,
``display_name`` and ``description``. Mediums can be created with
``Medium.objects.create``, using the following parameters:
:type name: str
:param name: A short, unique name for the medium.
:type display_name: str
:param display_name: A short, human readable name for the medium.
Does not need to be unique.
:type description: str
:param description: A human readable description of the
medium.
Encoding a ``Medium`` object in the database serves two
purposes. First, it is referenced when subscriptions are
created. Second the ``Medium`` objects provide an entry point to
query for events and have all the subscription logic and filtering
taken care of for you.
Any time a new way to display events to a user is created, a
corresponding ``Medium`` should be created. Some examples could
include a medium for sending email notifications, a medium for
individual newsfeeds, or a medium for a site wide notification
center.
Once a medium object is created, and corresponding subscriptions
are created, there are three methods on the medium object that can
be used to query for events. They are ``events``,
``entity_events`` and ``events_targets``. The differences between
these methods are described in their corresponding documentation.
A medium can use a ``RenderingStyle`` to use a configured style of rendering
with the medium. Any associated ``ContextRenderer`` models defined with
that rendering style will be used to render events in the ``render`` method
of the medium. This is an optional part of Entity Event's built-in
rendering system. If a rendering style is not set up for a particular source or
source group, it will try to use the default rendering style specified
in settings.
A medium can also provided ``additional_context`` that will always be passed
to the templates of its rendered events. This allows for medium-specific rendering
styles to be used. For example, perhaps a medium wishes to display a short description
of an event but does not wish to display the names of the event actors since those
names are already displayed in other places on the page. In this case, the medium
can always pass additional context to suppress rendering of names.
"""
# A name and display name for the medium along with a description for any
# application display
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
time_created = models.DateTimeField(auto_now_add=True)
# The rendering style determines the primary way the medium will try to render events.
# If a context loader has been defined for this rendering style along with the appropriate
# source, the renderer will be used. If a context renderer has not been set up with this
# rendering style, it will try to use the default style configured in settings.
rendering_style = models.ForeignKey('entity_event.RenderingStyle', null=True, on_delete=models.CASCADE)
# These values are passed in as additional context to whatever event is being rendered.
additional_context = JSONField(null=True, default=None, encoder=DjangoJSONEncoder)
def __str__(self):
"""
Readable representation of ``Medium`` objects.
"""
return self.display_name
@transaction.atomic
def events(self, **event_filters):
"""
Return subscribed events, with basic filters.
This method of getting events is useful when you want to
display events for your medium, independent of what entities
were involved in those events. For example, this method can be
used to display a list of site-wide events that happened in the
past 24 hours:
.. code-block:: python
TEMPLATE = '''
<html><body>
<h1> Yoursite's Events </h1>
<ul>
{% for event in events %}
<li> {{ event.context.event_text }} </li>
{% endfor %}
</ul>
</body></html>
'''
def site_feed(request):
site_feed_medium = Medium.objects.get(name='site_feed')
start_time = datetime.utcnow() - timedelta(days=1)
context = {}
context['events'] = site_feed_medium.events(start_time=start_time)
return HttpResponse(TEMPLATE.render(context))
While the `events` method does not filter events based on what
entities are involved, filtering based on the properties of the events
themselves is supported, through the following arguments, all
of which are optional.
:type start_time: datetime.datetime (optional)
:param start_time: Only return events that occurred after the
given time. If no time is given for this argument, no
filtering is done.
:type end_time: datetime.datetime (optional)
:param end_time: Only return events that occurred before the
given time. If no time is given for this argument, no
filtering is done
:type seen: Boolean (optional)
:param seen: This flag controls whether events that have
marked as seen are included. By default, both events that
have and have not been marked as seen are included. If
``True`` is given for this parameter, only events that
have been marked as seen will be included. If ``False`` is
given, only events that have not been marked as seen will
be included.
:type include_expired: Boolean (optional)
:param include_expired: By default, events that have a
expiration time, which has passed, are not included in the
results. Passing in ``True`` to this argument causes
expired events to be returned as well.
:type actor: Entity (optional)
:param actor: Only include events with the given entity as an
actor.
:type mark_seen: Boolean (optional)
:param mark_seen: Create a side effect in the database that
marks all the returned events as having been seen by this
medium.
:rtype: EventQuerySet
:returns: A queryset of events.
"""
events = self.get_filtered_events(**event_filters)
subscriptions = Subscription.objects.cache_related().filter(
medium=self
)
subscription_q_objects = [
Q(
eventactor__entity__in=self.followed_by(sub.subscribed_entities()),
source=sub.source
)
for sub in subscriptions if sub.only_following
]
subscription_q_objects.append(
Q(source__in=[sub.source for sub in subscriptions if not sub.only_following]))
events = events.cache_related().filter(reduce(or_, subscription_q_objects))
return events
@transaction.atomic
def entity_events(self, entity, **event_filters):
"""
Return subscribed events for a given entity.
This method of getting events is useful when you want to see
only the events relevant to a single entity. The events
returned are events that the given entity is subscribed to,
either directly as an individual entity, or because they are
part of a group subscription. As an example, the
`entity_events` method can be used to implement a newsfeed for
a individual entity:
.. code-block:: python
TEMPLATE = '''
<html><body>
<h1> {entity}'s Events </h1>
<ul>
{% for event in events %}
<li> {{ event.context.event_text }} </li>
{% endfor %}
</ul>
</body></html>
'''
def newsfeed(request):
newsfeed_medium = Medium.objects.get(name='newsfeed')
entity = Entity.get_for_obj(request.user)
context = {}
context['entity'] = entity
context['events'] = site_feed_medium.entity_events(entity, seen=False, mark_seen=True)
return HttpResponse(TEMPLATE.render(context))
The only required argument for this method is the entity to
get events for. Filtering based on the properties of the
events themselves is supported, through the rest of the
following arguments, which are optional.
:type_entity: Entity
:param entity: The entity to get events for.
:type start_time: datetime.datetime (optional)
:param start_time: Only return events that occurred after the
given time. If no time is given for this argument, no
filtering is done.
:type end_time: datetime.datetime (optional)
:param end_time: Only return events that occurred before the
given time. If no time is given for this argument, no
filtering is done
:type seen: Boolean (optional)
:param seen: This flag controls whether events that have
marked as seen are included. By default, both events that
have and have not been marked as seen are included. If
``True`` is given for this parameter, only events that
have been marked as seen will be included. If ``False`` is
given, only events that have not been marked as seen will
be included.
:type include_expired: Boolean (optional)
:param include_expired: By default, events that have a
expiration time, which has passed, are not included in the
results. Passing in ``True`` to this argument causes
expired events to be returned as well.
:type actor: Entity (optional)
:param actor: Only include events with the given entity as an
actor.
:type mark_seen: Boolean (optional)
:param mark_seen: Create a side effect in the database that
marks all the returned events as having been seen by this
medium.
:rtype: EventQuerySet
:returns: A queryset of events.
"""
events = self.get_filtered_events(**event_filters)
subscriptions = Subscription.objects.filter(medium=self)
subscriptions = self.subset_subscriptions(subscriptions, entity)
subscription_q_objects = [
Q(
eventactor__entity__in=self.followed_by(entity),
source=sub.source
)
for sub in subscriptions if sub.only_following
]
subscription_q_objects.append(
Q(source__in=[sub.source for sub in subscriptions if not sub.only_following])
)
return [
event for event in events.filter(reduce(or_, subscription_q_objects))
if self.filter_source_targets_by_unsubscription(event.source_id, [entity])
]
@transaction.atomic
def events_targets(self, entity_kind=None, **event_filters):
"""
Return all events for this medium, with who each event is for.
This method is useful for individually notifying every entity concerned with a collection of events, while
still respecting subscriptions and usubscriptions. For example, ``events_targets`` can be used to send email
notifications, by retrieving all unseen events (and marking them as now having been seen), and then processing
the emails. In code, this could look like:
.. code-block:: python
email = Medium.objects.get(name='email')
new_emails = email.events_targets(seen=False, mark_seen=True)
for event, targets in new_emails:
django.core.mail.send_mail(
subject = event.context["subject"]
message = event.context["message"]
recipient_list = [t.entity_meta["email"] for t in targets]
)
This ``events_targets`` method attempts to make bulk processing of push-style notifications straightforward.
This sort of processing should normally occur in a separate thread from any request/response cycle.
Filtering based on the properties of the events themselves is supported, through the rest of the following
arguments, which are optional.
:type entity_kind: EntityKind
:param entity_kind: Only include targets of the given kind in each targets list.
:type start_time: datetime.datetime (optional)
:param start_time: Only return events that occurred after the given time. If no time is given for this argument,
no filtering is done.
:type end_time: datetime.datetime (optional)
:param end_time: Only return events that occurred before the given time. If no time is given for this argument,
no filtering is done
:type seen: Boolean (optional)
:param seen: This flag controls whether events that have marked as seen are included. By default, both events
that have and have not been marked as seen are included. If ``True`` is given for this parameter, only
events that have been marked as seen will be included. If ``False`` is given, only events that have not
been marked as seen will be included.
:type include_expired: Boolean (optional)
:param include_expired: By default, events that have a expiration time, which has passed, are not included in
the results. Passing in ``True`` to this argument causes expired events to be returned as well.
:type actor: Entity (optional)
:param actor: Only include events with the given entity as an actor.
:type mark_seen: Boolean (optional)
:param mark_seen: Create a side effect in the database that marks all the returned events as having been seen
by this medium.
:rtype: List of tuples
:returns: A list of tuples in the form ``(event, targets)`` where ``targets`` is a list of entities.
"""
events = self.get_filtered_events(**event_filters)
subscriptions = Subscription.objects.filter(medium=self)
event_pairs = []
for event in events:
targets = []
for sub in subscriptions:
if event.source != sub.source:
continue
subscribed = sub.subscribed_entities()
if sub.only_following:
potential_targets = self.followers_of(
event.eventactor_set.values_list('entity__id', flat=True)
)
subscription_targets = list(Entity.objects.filter(
Q(id__in=subscribed), Q(id__in=potential_targets)))
else:
subscription_targets = list(subscribed)
targets.extend(subscription_targets)
targets = self.filter_source_targets_by_unsubscription(event.source_id, targets)
if entity_kind:
targets = [t for t in targets if t.entity_kind == entity_kind]
if targets:
event_pairs.append((event, targets))
return event_pairs
def subset_subscriptions(self, subscriptions, entity=None):
"""
Return only subscriptions the given entity is a part of.
An entity is "part of a subscription" if either:
1. The subscription is for that entity, with no
sub-entity-kind. That is, it is not a group subscription.
2. The subscription is for a super-entity of the given entity,
and the subscription's sub-entity-kind is the same as that of
the entity's.
:type subscriptions: QuerySet
:param subscriptions: A QuerySet of subscriptions to subset.
:type entity: (optional) Entity
:param entity: Subset subscriptions to only those relevant for
this entity.
:rtype: QuerySet
:returns: A queryset of filtered subscriptions.
"""
if entity is None:
return subscriptions
super_entities = EntityRelationship.objects.filter(
sub_entity=entity).values_list('super_entity')
subscriptions = subscriptions.filter(
Q(entity=entity, sub_entity_kind=None) |
Q(entity__in=super_entities, sub_entity_kind=entity.entity_kind)
)
return subscriptions
@cached_property
def unsubscriptions(self):
"""
Returns the unsubscribed entity IDs for each source as a dict,
keyed on source_id.
:rtype: Dictionary
:returns: A dictionary of the form ``{source_id: entities}``
where ``entities`` is a list of entities unsubscribed from
that source for this medium.
"""
unsubscriptions = defaultdict(list)
for unsub in Unsubscription.objects.filter(medium=self).values('entity', 'source'):
unsubscriptions[unsub['source']].append(unsub['entity'])
return unsubscriptions
def filter_source_targets_by_unsubscription(self, source_id, targets):
"""
Given a source id and targets, filter the targets by
unsubscriptions. Return the filtered list of targets.
"""
unsubscriptions = self.unsubscriptions
return [t for t in targets if t.id not in unsubscriptions[source_id]]
def get_filtered_events_queries(self, start_time, end_time, seen, include_expired, actor):
"""
Return Q objects to filter events table to relevant events.
The filters that are applied are those passed in from the
method that is querying the events table: One of ``events``,
``entity_events`` or ``events_targets``. The arguments have
the behavior documented in those methods.
:rtype: List of Q objects
:returns: A list of Q objects, which can be used as arguments
to ``Event.objects.filter``.
"""
now = datetime.utcnow()
filters = []
if start_time is not None:
filters.append(Q(time__gte=start_time))
if end_time is not None:
filters.append(Q(time__lte=end_time))
if not include_expired:
filters.append(Q(time_expires__gte=now))
# Check explicitly for True and False as opposed to None
# - `seen==False` gets unseen notifications
# - `seen is None` does no seen/unseen filtering
if seen is True:
filters.append(Q(eventseen__medium=self))
elif seen is False:
unseen_ids = _unseen_event_ids(medium=self)
filters.append(Q(id__in=unseen_ids))
# Filter by actor
if actor is not None:
filters.append(Q(eventactor__entity=actor))
return filters
def get_filtered_events(
self, start_time=None, end_time=None, seen=None, mark_seen=False, include_expired=False, actor=None):
"""
Retrieves events, filters by event level filters, and marks them as
seen if necessary.
:rtype: EventQuerySet
:returns: All events which match the given filters.
"""
filtered_events_queries = self.get_filtered_events_queries(start_time, end_time, seen, include_expired, actor)
events = Event.objects.filter(*filtered_events_queries)
if seen is False and mark_seen:
# Evaluate the event qset here and create a new queryset that is no longer filtered by
# if the events are marked as seen. We do this because we want to mark the events
# as seen in the next line of code. If we didn't evaluate the qset here first, it result
# in not returning unseen events since they are marked as seen.
events = Event.objects.filter(id__in=list(e.id for e in events))
events.mark_seen(self)
return events
def followed_by(self, entities):
"""
Define what entities are followed by the entities passed to this
method.
This method can be overridden by a class that concretely
inherits ``Medium``, to define custom semantics for the
``only_following`` flag on relevant ``Subscription``
objects. Overriding this method, and ``followers_of`` will be
sufficient to define that behavior. This method is not useful
to call directly, but is used by the methods that filter
events and targets.
This implementation attempts to provide a sane default. In
this implementation, the entities followed by the ``entities``
argument are the entities themselves, and their super entities.
That is, individual entities follow themselves, and the groups
they are a part of. This works as a default implementation,
but, for example, an alternate medium may wish to define the
opposite behavior, where an individual entity follows
themselves and all of their sub-entities.
Return a queryset of the entities that the given entities are
following. This needs to be the inverse of ``followers_of``.
:type entities: Entity or EntityQuerySet
:param entities: The Entity, or QuerySet of Entities of interest.
:rtype: EntityQuerySet
:returns: A QuerySet of all the entities followed by any of
those given.
"""
if isinstance(entities, Entity):
entities = Entity.objects.filter(id=entities.id)
super_entities = EntityRelationship.objects.filter(
sub_entity__in=entities).values_list('super_entity')
followed_by = Entity.objects.filter(
Q(id__in=entities) | Q(id__in=super_entities))
return followed_by
def followers_of(self, entities):
"""
Define what entities are followers of the entities passed to this
method.
This method can be overridden by a class that concretely
inherits ``Medium``, to define custom semantics for the
``only_following`` flag on relevant ``Subscription``
objects. Overriding this method, and ``followed_by`` will be
sufficient to define that behavior. This method is not useful
to call directly, but is used by the methods that filter
events and targets.
This implementation attempts to provide a sane default. In
this implementation, the followers of the entities passed in
are defined to be the entities themselves, and their
sub-entities.
That is, the followers of individual entities are themselves,
and if the entity has sub-entities, those sub-entities. This
works as a default implementation, but, for example, an
alternate medium may wish to define the opposite behavior,
where an the followers of an individual entity are themselves
and all of their super-entities.
Return a queryset of the entities that follow the given
entities. This needs to be the inverse of ``followed_by``.
:type entities: Entity or EntityQuerySet
:param entities: The Entity, or QuerySet of Entities of interest.
:rtype: EntityQuerySet
:returns: A QuerySet of all the entities who are followers of
any of those given.
"""
if isinstance(entities, Entity):
entities = Entity.objects.filter(id=entities.id)
sub_entities = EntityRelationship.objects.filter(
super_entity__in=entities).values_list('sub_entity')
followers_of = Entity.objects.filter(
Q(id__in=entities) | Q(id__in=sub_entities))
return followers_of
def render(self, events):
"""
Renders a list of events for this medium. The events first have their contexts loaded.
Afterwards, the rendered events are returned as a dictionary keyed on the event itself.
The key points to a tuple of (txt, html) renderings of the event.
:type events: list
:param events: A list or queryset of Event models.
:rtype: dict
:returns: A dictionary of rendered text and html tuples keyed on the provided events.
"""
from entity_event import context_loader
context_loader.load_contexts_and_renderers(events, [self])
return {e: e.render(self) for e in events}
@python_2_unicode_compatible
class Source(models.Model):
"""
A ``Source`` is an object in the database that represents where
events come from. These objects only require a few fields,
``name``, ``display_name`` ``description``, and ``group``.
Source objects categorize events
based on where they came from, or what type of information they
contain. Each source should be fairly fine grained, with broader
categorizations possible through ``SourceGroup`` objects. Sources
can be created with ``Source.objects.create`` using the following
parameters:
:type name: str
:param name: A short, unique name for the source.
:type display_name: str
:param display_name: A short, human readable name for the source.
Does not need to be unique.
:type description: str
:param description: A human readable description of the source.
:type group: SourceGroup
:param group: A SourceGroup object. A broad grouping of where the
events originate.
Storing source objects in the database servers two purposes. The
first is to provide an object that Subscriptions can reference,
allowing different categories of events to be subscribed to over
different mediums. The second is to allow source instances to
store a reference to a function which can populate event contexts
with additional information that is relevant to the source. This
allows ``Event`` objects to be created with minimal data
duplication.
Once sources are created, they will primarily be used to
categorize events, as each ``Event`` object requires a reference
to a source. Additionally they will be referenced by
``Subscription`` objects to route events of the given source to be
handled by a given medium.
"""
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
group = models.ForeignKey('entity_event.SourceGroup', on_delete=models.CASCADE)
def __str__(self):
"""
Readable representation of ``Source`` objects.
"""
return self.display_name
@python_2_unicode_compatible
class SourceGroup(models.Model):
"""
A ``SourceGroup`` object is a high level categorization of
events. Since ``Source`` objects are meant to be very fine
grained, they are collected into ``SourceGroup`` objects. There is
no additional behavior associated with the source groups other
than further categorization. Source groups can be created with
``SourceGroup.objects.create``, which takes the following
arguments:
:type name: str
:param name: A short, unique name for the source group.
:type display_name: str
:param display_name: A short, human readable name for the source
group. Does not need to be unique.
:type description: str
:param description: A human readable description of the source
group.
"""
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
def __str__(self):
"""
Readable representation of ``SourceGroup`` objects.
"""
return self.display_name
@python_2_unicode_compatible
class Unsubscription(models.Model):
"""
Because django-entity-event allows for whole groups to be
subscribed to events at once, unsubscribing an entity is not as
simple as removing their subscription object. Instead, the
Unsubscription table provides a simple way to ensure that an
entity does not see events if they don't want to.
Unsubscriptions are created for a single entity at a time, where
they are unsubscribed for events from a source on a medium. This
is stored as an ``Unsubscription`` object in the database, which
can be created using ``Unsubscription.objects.create`` using the
following arguments:
:type entity: Entity
:param entity: The entity to unsubscribe.
:type medium: Medium
:param medium: The ``Medium`` object representing where they don't
want to see the events.
:type source: Source
:param source: The ``Source`` object representing what category
of event they no longer want to see.
Once an ``Unsubscription`` object is created, all of the logic to
ensure that they do not see events form the given source by the
given medium is handled by the methods used to query for events
via the ``Medium`` object. That is, once the object is created, no
more work is needed to unsubscribe them.
"""
entity = models.ForeignKey('entity.Entity', on_delete=models.CASCADE)
medium = models.ForeignKey('entity_event.Medium', on_delete=models.CASCADE)
source = models.ForeignKey('entity_event.Source', on_delete=models.CASCADE)
def __str__(self):
"""
Readable representation of ``Unsubscription`` objects.
"""
s = '{entity} from {source} by {medium}'
entity = self.entity.__str__()
source = self.source.__str__()
medium = self.medium.__str__()
return s.format(entity=entity, source=source, medium=medium)
class SubscriptionQuerySet(QuerySet):
"""
A custom QuerySet for Subscriptions.
"""
def cache_related(self):
"""
Cache any related objects that we may use
:return:
"""
return self.select_related('medium', 'source', 'entity', 'sub_entity_kind')
@python_2_unicode_compatible
class Subscription(models.Model):
"""
Which types of events are available to which mediums is controlled through ``Subscription`` objects. By creating a
``Subscription`` object in the database, you are storing that events from a given ``Source`` object should be
available to a given ``Medium`` object.
Each ``Subscription`` object can be one of two levels, either an individual subscription or a group subscription.
Additionally, each ``Subscription`` object can be one of two types of subscription, either a global subscription,
or an "only following" subscription. ``Subscription`` objects are created using ``Subscription.objects.create``
which takes the following arguments:
:type medium: Medium
:param medium: The ``Medium`` object to make events available to.
:type source: Source
:param source: The ``Source`` object that represents the category of events to make available.
:type entity: Entity
:param entity: The entity to subscribe in the case of an individual subscription, or in the case of a group
subscription, the super-entity of the group.
:type sub_entity_kind: (optional) EntityKind
:param sub_entity_kind: When creating a group subscription, this is a foreign key to the ``EntityKind`` of the
sub-entities to subscribe. In the case of an individual subscription, this should be ``None``.
:type only_following: Boolean
:param only_following: If ``True``, events will be available to entities through the medium only if the entities
are following the actors of the event. If ``False``, the events will all be available to all the entities
through the medium.
When a ``Medium`` object is used to query for events, only the
events that have a subscription for their source to that medium
will ever be returned. This is an extremely useful property that
allows complex subscription logic to be handled simply by storing
subscription objects in the database.
Storing subscriptions is made simpler by the ability to subscribe
groups of entities with a single subscription object. Groups of
entities of a given kind can be subscribed by subscribing their
super-entity and providing the ``sub_entity_kind`` argument.
Subscriptions further are specified to be either an "only following"
subscription or not. This specification controls what
events will be returned when ``Medium.entity_events`` is called,
and controls what targets are returned when
``Medium.events_targets`` is called.
For example, if events are created for a new photo being uploaded
(from a single source called, say "photos"), and we want to provide
individuals with a notification in their newsfeed (through a
medium called "newsfeed"), we want to be able to display only the
events where the individual is tagged in the photo. By setting
``only_following`` to true the following code would only return
events where the individual was included in the ``EventActor`` s,
rather than returning all "photos" events:
.. code-block:: python
user_entity = Entity.objects.get_for_obj(user)
newsfeed_medium = Medium.objects.get(name='newsfeed')
newsfeed.entity_events(user)
The behavior of what constitutes "following" is controlled by the
Medium class. A default implementation of following is provided
and documented in the ``Medium.followers_of`` and
``Medium.followed_by`` methods, but could be extended by
subclasses of Medium.
"""
medium = models.ForeignKey('entity_event.Medium', on_delete=models.CASCADE)
source = models.ForeignKey('entity_event.Source', on_delete=models.CASCADE)
entity = models.ForeignKey('entity.Entity', related_name='+', on_delete=models.CASCADE)
sub_entity_kind = models.ForeignKey(
'entity.EntityKind', null=True, related_name='+', default=None, on_delete=models.CASCADE
)
only_following = models.BooleanField(default=True)
objects = SubscriptionQuerySet.as_manager()
def __str__(self):
"""
Readable representation of ``Subscription`` objects.
"""
s = '{entity} to {source} by {medium}'
entity = self.entity.__str__()
source = self.source.__str__()
medium = self.medium.__str__()
return s.format(entity=entity, source=source, medium=medium)
def subscribed_entities(self):
"""
Return a queryset of all subscribed entities.
This will be a single entity in the case of an individual subscription, otherwise it will be all the entities
in the group subscription.
:rtype: EntityQuerySet
:returns: A QuerySet of all the entities that are a part of this subscription.
"""
if self.sub_entity_kind is not None:
sub_entities = self.entity.sub_relationships.filter(
sub_entity__entity_kind=self.sub_entity_kind).values_list('sub_entity')
entities = Entity.objects.filter(id__in=sub_entities)
else:
entities = Entity.all_objects.filter(id=self.entity.id)
return entities
class EventQuerySet(QuerySet):
"""
A custom QuerySet for Events.
"""
def cache_related(self):
"""
Cache any related objects that we may use
:return:
"""
return self.select_related(
'source'
).prefetch_related(
'source__group'
)
def mark_seen(self, medium):
"""
Creates EventSeen objects for the provided medium for every event
in the queryset.
Creating these EventSeen objects ensures they will not be
returned when passing ``seen=False`` to any of the medium
event retrieval functions, ``events``, ``entity_events``, or
``events_targets``.
"""
EventSeen.objects.bulk_create([
EventSeen(event=event, medium=medium) for event in self
])
def load_contexts_and_renderers(self, medium):
"""
Loads context data into the event ``context`` variable. This method
destroys the queryset and returns a list of events.
"""
from entity_event import context_loader
return context_loader.load_contexts_and_renderers(self, [medium])
class EventManager(models.Manager):
"""
A custom Manager for Events.
"""
def get_queryset(self):
"""
Return the EventQuerySet.
"""
return EventQuerySet(self.model)
def cache_related(self):
"""
Return a queryset with prefetched values
:return:
"""
return self.get_queryset().cache_related()
def mark_seen(self, medium):
"""
Creates EventSeen objects for the provided medium for every event
in the queryset.
Creating these EventSeen objects ensures they will not be
returned when passing ``seen=False`` to any of the medium
event retrieval functions, ``events``, ``entity_events``, or
``events_targets``.
"""
return self.get_queryset().mark_seen(medium)
def load_contexts_and_renderers(self, medium):
"""
Loads context data into the event ``context`` variable. This method
destroys the queryset and returns a list of events.
"""
return self.get_queryset().load_contexts_and_renderers(medium)
@transaction.atomic
def create_event(self, actors=None, ignore_duplicates=False, **kwargs):
"""
Create events with actors.
This method can be used in place of ``Event.objects.create``
to create events, and the appropriate actors. It takes all the
same keywords as ``Event.objects.create`` for the event
creation, but additionally takes a list of actors, and can be
told to not attempt to create an event if a duplicate event
exists.
:type source: Source
:param source: A ``Source`` object representing where the
event came from.
:type context: dict
:param context: A dictionary containing relevant
information about the event, to be serialized into
JSON. It is possible to load additional context
dynamically when events are fetched. See the
documentation on the ``ContextRenderer`` model.
:type uuid: str
:param uuid: A unique string for the event. Requiring a
``uuid`` allows code that creates events to ensure they do
not create duplicate events. This id could be, for example
some hash of the ``context``, or, if the creator is
unconcerned with creating duplicate events a call to
python's ``uuid1()`` in the ``uuid`` module.
:type time_expires: datetime (optional)
:param time_expires: If given, the default methods for
querying events will not return this event after this time
has passed.
:type actors: (optional) List of entities or list of entity ids.
:param actors: An ``EventActor`` object will be created for
each entity in the list. This allows for subscriptions
which are only following certain entities to behave
appropriately.
:type ignore_duplicates: (optional) Boolean
:param ignore_duplicates: If ``True``, a check will be made to
ensure that an event with the give ``uuid`` does not exist
before attempting to create the event. Setting this to
``True`` allows the creator of events to gracefully ensure
no duplicates are attempted to be created. There is a uniqueness constraint on uuid
so it will raise an exception if duplicates are allowed and submitted.
:rtype: Event
:returns: The created event. Alternatively if a duplicate
event already exists and ``ignore_duplicates`` is
``True``, it will return ``None``.
"""
kwargs['actors'] = actors
kwargs['ignore_duplicates'] = ignore_duplicates
events = self.create_events([kwargs])
if events:
return events[0]
return None
def create_events(self, kwargs_list):
"""
Create events in bulk to save on queries. Each element in the kwargs list should be a dict with the same set
of arguments you would normally pass to create_event
:param kwargs_list: list of kwargs dicts
:return: list of Event
"""
# Build map of uuid to event info
uuid_map = {
kwargs.get('uuid', ''): {
'actors': kwargs.pop('actors', []),
'ignore_duplicates': kwargs.pop('ignore_duplicates', False),
'event_kwargs': kwargs
}
for kwargs in kwargs_list
}
# Check for uuids
uuid_set = set(Event.objects.filter(uuid__in=uuid_map.keys()).values_list('uuid', flat=True))
# Set a flag for whether each uuid exists
for uuid, event_dict in uuid_map.items():
event_dict['exists'] = uuid in uuid_set
# Build list of events to bulk create
events_to_create = []
for uuid, event_dict in uuid_map.items():
# If the event doesn't already exist or the event does exist but we are allowing duplicates
if not event_dict['exists'] or not event_dict['ignore_duplicates']:
events_to_create.append(Event(**event_dict['event_kwargs']))
# Bulk create the events
created_events = Event.objects.bulk_create(events_to_create)
# Build list of EventActor objects to bulk create
event_actors_to_create = []
for created_event in created_events:
event_dict = uuid_map[created_event.uuid]
if event_dict['actors'] is not None:
for actor in event_dict['actors']:
actor_id = actor.id if hasattr(actor, 'id') else actor
event_actors_to_create.append(EventActor(entity_id=actor_id, event=created_event))
EventActor.objects.bulk_create(event_actors_to_create)
return created_events
@python_2_unicode_compatible
class Event(models.Model):
"""
``Event`` objects store information about events. By storing
events, from a given source, with some context, they are made
available to any ``Medium`` object with an appropriate
subscription. Events can be created with
``Event.objects.create_event``, documented above.
When creating an event, the information about what occurred is
stored in a JSON blob in the ``context`` field. This context can
be any type of information that could be useful for displaying
events on a given Medium. It is entirely the role of the
application developer to ensure that there is agreement between
what information is stored in ``Event.context`` and what
information the code the processes and displays events on each
medium expects.
Events will usually be created by code that also created, or knows
about the ``Source`` object that is required to create the event.
To prevent storing unnecessary data in the context, this code can
define a context loader function when creating this source, which
can be used to dynamically fetch more data based on whatever
limited amount of data makes sense to store in the context. This
is further documented in the ``Source`` documentation.
"""
source = models.ForeignKey('entity_event.Source', on_delete=models.CASCADE)
context = JSONField(encoder=DjangoJSONEncoder)
time = models.DateTimeField(auto_now_add=True, db_index=True)
time_expires = models.DateTimeField(default=datetime.max, db_index=True)
uuid = models.CharField(max_length=512, unique=True)
objects = EventManager()
def __init__(self, *args, **kwargs):
super(Event, self).__init__(*args, **kwargs)
# A dictionary that is populated with renderers after the contexts have been
# properly loaded. When renderers are available, the 'render' method may be
# called with a medium and optional observer
self._context_renderers = {}
def _merge_medium_additional_context_with_context(self, medium):
"""
If the medium has additional context properties, merge those together here in the
main context before rendering.
"""
if medium.additional_context:
context = self.context.copy()
context.update(medium.additional_context)
return context
else:
return self.context
def render(self, medium, observing_entity=None):
"""
Returns the rendered event as a tuple of text and html content. This information
is filled out with respect to which medium is rendering the event, what context
renderers are available with the prefetched context, and which optional entity
may be observing the rendered event.
"""
if medium not in self._context_renderers:
raise RuntimeError('Context and renderer for medium {0} has not or cannot been fetched'.format(medium))
else:
context = self._merge_medium_additional_context_with_context(medium)
return self._context_renderers[medium].render_context_to_text_html_templates(context)
def get_serialized_context(self, medium):
"""
Returns the serialized context of the event for a specific medium
:param medium:
:return:
"""
if medium not in self._context_renderers:
raise RuntimeError('Context and renderer for medium {0} has not or cannot been fetched'.format(medium))
else:
context = self._merge_medium_additional_context_with_context(medium)
return self._context_renderers[medium].get_serialized_context(context)
def __str__(self):
"""
Readable representation of ``Event`` objects.
"""
s = '{source} event at {time}'
source = self.source.__str__()
time = self.time.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(source=source, time=time)
class AdminEvent(Event):
"""
A proxy model used to provide a separate interface for event
creation through the django-admin interface.
"""
class Meta:
proxy = True
@python_2_unicode_compatible
class EventActor(models.Model):
"""
``EventActor`` objects encode what entities were involved in an
event. They provide the information necessary to create "only
following" subscriptions which route events only to the entities
that are involved in the event.
``EventActor`` objects should not be created directly, but should
be created as part of the creation of ``Event`` objects, using
``Event.objects.create_event``.
"""
event = models.ForeignKey('entity_event.Event', on_delete=models.CASCADE)
entity = models.ForeignKey('entity.Entity', on_delete=models.CASCADE)
def __str__(self):
"""
Readable representation of ``EventActor`` objects.
"""
s = 'Event {eventid} - {entity}'
eventid = self.event.id
entity = self.entity.__str__()
return s.format(eventid=eventid, entity=entity)
@python_2_unicode_compatible
class EventSeen(models.Model):
"""
``EventSeen`` objects store information about where and when an
event was seen. They store the medium that the event was seen on,
and what time it was seen. This information is used by the event
querying methods on ``Medium`` objects to filter events by whether
or not they have been seen on that medium.
``EventSeen`` objects should not be created directly, but should
be created by using the ``EventQuerySet.mark_seen`` method,
available on the QuerySets returned by the event querying methods.
"""
event = models.ForeignKey('entity_event.Event', on_delete=models.CASCADE)
medium = models.ForeignKey('entity_event.Medium', on_delete=models.CASCADE)
time_seen = models.DateTimeField(default=datetime.utcnow)
class Meta:
unique_together = ('event', 'medium')
def __str__(self):
"""
Readable representation of ``EventSeen`` objects.
"""
s = 'Seen on {medium} at {time}'
medium = self.medium.__str__()
time = self.time_seen.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(medium=medium, time=time)
@python_2_unicode_compatible
class RenderingStyle(models.Model):
"""
Defines a rendering style. This is used to group together mediums that have
similar rendering styles and allows context renderers to be used across
mediums.
"""
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64, default='')
def __str__(self):
return '{0} {1}'.format(self.display_name, self.name)
@python_2_unicode_compatible
class ContextRenderer(models.Model):
"""
``ContextRenderer`` objects store information about how
a source or source group is rendered with a particular rendering style, along with
information for loading the render context in a database-efficient
manner.
Of the four template fields: `text_template_path`, 'html_template_path',
`text_template`, and `html_template`, at least one must be
non-empty. Both a text and html template may be provided, either
through a path to the template, or a raw template object.
If both are provided, the template given in the path will be used and
the text template will be ignored.
This object is linked to a `RenderingStyle` object. This is how the
context renderer is associated with various `Medium` objects. It also
provides the `source` that uses the renderer. If a `source_group` is specified,
all sources under that group use this context renderer for the rendering style.
The `context_hints` provide the ability to fetch model IDs of an event context that
are stored in the database. For example, if an event context has a `user` key that
points to the PK of a Django `User` model, the context hints for it would be specified
as follows:
.. code-block:: python
{
'user': {
'app_name': 'auth',
'model_name': 'User',
}
}
With these hints, the 'user' field in the event context will be treated as a PK in the
database and fetched appropriately. If one wishes to perform and prefetch or select_related
calls, the following options can be added:
.. code-block:: python
{
'user': {
'app_name': 'auth',
'model_name': 'User',
'select_related': ['foreign_key_field', 'one_to_one_field'],
'prefetch_related': ['reverse_foreign_key_field', 'many_to_many_field'],
}
}
Note that as many keys can be defined that have corresponding keys in the event context for
the particular source or source group. Also note that the keys in the event context can
be embedded anywhere in the context and can also point to a list of PKs. For example:
.. code-block:: python
{
'my_context': {
'user': [1, 3, 5, 10],
'other_context_info': 'other_info_string',
},
'more_context': {
'hello': 'world',
}
}
In the above case, `User` objects with the PKs 1, 3, 5, and 10 will be fetched and loaded into
the event context whenever rendering is performed.
"""
name = models.CharField(max_length=64, unique=True)
# The various templates that can be used for rendering
text_template_path = models.CharField(max_length=256, default='')
html_template_path = models.CharField(max_length=256, default='')
text_template = models.TextField(default='')
html_template = models.TextField(default='')
# The source or source group of the event. It can only be one or the other
source = models.ForeignKey('entity_event.Source', null=True, on_delete=models.CASCADE)
source_group = models.ForeignKey('entity_event.SourceGroup', null=True, on_delete=models.CASCADE)
# The rendering style. Used to associated it with a medium
rendering_style = models.ForeignKey('entity_event.RenderingStyle', on_delete=models.CASCADE)
# Contains hints on how to fetch the context from the database
context_hints = JSONField(null=True, default=None, encoder=DjangoJSONEncoder)
class Meta:
unique_together = ('source', 'rendering_style')
def get_sources(self):
return [self.source] if self.source_id else self.source_group.source_set.all()
def __str__(self):
return self.name
def get_serialized_context(self, context):
"""
Serializes the context using the serializer class.
"""
return DefaultContextSerializer(context).data
def render_text_or_html_template(self, context, is_text=True):
"""
Renders a text or html template based on either the template path or the
stored template.
"""
template_path = getattr(self, '{0}_template_path'.format('text' if is_text else 'html'))
template = getattr(self, '{0}_template'.format('text' if is_text else 'html'))
if template_path:
return render_to_string(template_path, context)
elif template:
return Template(template).render(Context(context))
else:
return ''
def render_context_to_text_html_templates(self, context):
"""
Render the templates with the provided context.
Args:
A loaded context.
Returns:
A tuple of (rendered_text, rendered_html). Either, but not both
may be an empty string.
"""
# Process text template:
return (
self.render_text_or_html_template(context, is_text=True).strip(),
self.render_text_or_html_template(context, is_text=False).strip(),
)
|
ambitioninc/django-entity-event
|
entity_event/models.py
|
EventQuerySet.mark_seen
|
python
|
def mark_seen(self, medium):
EventSeen.objects.bulk_create([
EventSeen(event=event, medium=medium) for event in self
])
|
Creates EventSeen objects for the provided medium for every event
in the queryset.
Creating these EventSeen objects ensures they will not be
returned when passing ``seen=False`` to any of the medium
event retrieval functions, ``events``, ``entity_events``, or
``events_targets``.
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/models.py#L866-L878
| null |
class EventQuerySet(QuerySet):
"""
A custom QuerySet for Events.
"""
def cache_related(self):
"""
Cache any related objects that we may use
:return:
"""
return self.select_related(
'source'
).prefetch_related(
'source__group'
)
def load_contexts_and_renderers(self, medium):
"""
Loads context data into the event ``context`` variable. This method
destroys the queryset and returns a list of events.
"""
from entity_event import context_loader
return context_loader.load_contexts_and_renderers(self, [medium])
|
ambitioninc/django-entity-event
|
entity_event/models.py
|
EventManager.create_event
|
python
|
def create_event(self, actors=None, ignore_duplicates=False, **kwargs):
kwargs['actors'] = actors
kwargs['ignore_duplicates'] = ignore_duplicates
events = self.create_events([kwargs])
if events:
return events[0]
return None
|
Create events with actors.
This method can be used in place of ``Event.objects.create``
to create events, and the appropriate actors. It takes all the
same keywords as ``Event.objects.create`` for the event
creation, but additionally takes a list of actors, and can be
told to not attempt to create an event if a duplicate event
exists.
:type source: Source
:param source: A ``Source`` object representing where the
event came from.
:type context: dict
:param context: A dictionary containing relevant
information about the event, to be serialized into
JSON. It is possible to load additional context
dynamically when events are fetched. See the
documentation on the ``ContextRenderer`` model.
:type uuid: str
:param uuid: A unique string for the event. Requiring a
``uuid`` allows code that creates events to ensure they do
not create duplicate events. This id could be, for example
some hash of the ``context``, or, if the creator is
unconcerned with creating duplicate events a call to
python's ``uuid1()`` in the ``uuid`` module.
:type time_expires: datetime (optional)
:param time_expires: If given, the default methods for
querying events will not return this event after this time
has passed.
:type actors: (optional) List of entities or list of entity ids.
:param actors: An ``EventActor`` object will be created for
each entity in the list. This allows for subscriptions
which are only following certain entities to behave
appropriately.
:type ignore_duplicates: (optional) Boolean
:param ignore_duplicates: If ``True``, a check will be made to
ensure that an event with the give ``uuid`` does not exist
before attempting to create the event. Setting this to
``True`` allows the creator of events to gracefully ensure
no duplicates are attempted to be created. There is a uniqueness constraint on uuid
so it will raise an exception if duplicates are allowed and submitted.
:rtype: Event
:returns: The created event. Alternatively if a duplicate
event already exists and ``ignore_duplicates`` is
``True``, it will return ``None``.
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/models.py#L926-L988
|
[
"def create_events(self, kwargs_list):\n \"\"\"\n Create events in bulk to save on queries. Each element in the kwargs list should be a dict with the same set\n of arguments you would normally pass to create_event\n :param kwargs_list: list of kwargs dicts\n :return: list of Event\n \"\"\"\n # Build map of uuid to event info\n uuid_map = {\n kwargs.get('uuid', ''): {\n 'actors': kwargs.pop('actors', []),\n 'ignore_duplicates': kwargs.pop('ignore_duplicates', False),\n 'event_kwargs': kwargs\n\n }\n for kwargs in kwargs_list\n }\n\n # Check for uuids\n uuid_set = set(Event.objects.filter(uuid__in=uuid_map.keys()).values_list('uuid', flat=True))\n\n # Set a flag for whether each uuid exists\n for uuid, event_dict in uuid_map.items():\n event_dict['exists'] = uuid in uuid_set\n\n # Build list of events to bulk create\n events_to_create = []\n for uuid, event_dict in uuid_map.items():\n # If the event doesn't already exist or the event does exist but we are allowing duplicates\n if not event_dict['exists'] or not event_dict['ignore_duplicates']:\n events_to_create.append(Event(**event_dict['event_kwargs']))\n\n # Bulk create the events\n created_events = Event.objects.bulk_create(events_to_create)\n\n # Build list of EventActor objects to bulk create\n event_actors_to_create = []\n for created_event in created_events:\n event_dict = uuid_map[created_event.uuid]\n if event_dict['actors'] is not None:\n for actor in event_dict['actors']:\n actor_id = actor.id if hasattr(actor, 'id') else actor\n event_actors_to_create.append(EventActor(entity_id=actor_id, event=created_event))\n\n EventActor.objects.bulk_create(event_actors_to_create)\n\n return created_events\n"
] |
class EventManager(models.Manager):
"""
A custom Manager for Events.
"""
def get_queryset(self):
"""
Return the EventQuerySet.
"""
return EventQuerySet(self.model)
def cache_related(self):
"""
Return a queryset with prefetched values
:return:
"""
return self.get_queryset().cache_related()
def mark_seen(self, medium):
"""
Creates EventSeen objects for the provided medium for every event
in the queryset.
Creating these EventSeen objects ensures they will not be
returned when passing ``seen=False`` to any of the medium
event retrieval functions, ``events``, ``entity_events``, or
``events_targets``.
"""
return self.get_queryset().mark_seen(medium)
def load_contexts_and_renderers(self, medium):
"""
Loads context data into the event ``context`` variable. This method
destroys the queryset and returns a list of events.
"""
return self.get_queryset().load_contexts_and_renderers(medium)
@transaction.atomic
def create_events(self, kwargs_list):
"""
Create events in bulk to save on queries. Each element in the kwargs list should be a dict with the same set
of arguments you would normally pass to create_event
:param kwargs_list: list of kwargs dicts
:return: list of Event
"""
# Build map of uuid to event info
uuid_map = {
kwargs.get('uuid', ''): {
'actors': kwargs.pop('actors', []),
'ignore_duplicates': kwargs.pop('ignore_duplicates', False),
'event_kwargs': kwargs
}
for kwargs in kwargs_list
}
# Check for uuids
uuid_set = set(Event.objects.filter(uuid__in=uuid_map.keys()).values_list('uuid', flat=True))
# Set a flag for whether each uuid exists
for uuid, event_dict in uuid_map.items():
event_dict['exists'] = uuid in uuid_set
# Build list of events to bulk create
events_to_create = []
for uuid, event_dict in uuid_map.items():
# If the event doesn't already exist or the event does exist but we are allowing duplicates
if not event_dict['exists'] or not event_dict['ignore_duplicates']:
events_to_create.append(Event(**event_dict['event_kwargs']))
# Bulk create the events
created_events = Event.objects.bulk_create(events_to_create)
# Build list of EventActor objects to bulk create
event_actors_to_create = []
for created_event in created_events:
event_dict = uuid_map[created_event.uuid]
if event_dict['actors'] is not None:
for actor in event_dict['actors']:
actor_id = actor.id if hasattr(actor, 'id') else actor
event_actors_to_create.append(EventActor(entity_id=actor_id, event=created_event))
EventActor.objects.bulk_create(event_actors_to_create)
return created_events
|
ambitioninc/django-entity-event
|
entity_event/models.py
|
EventManager.create_events
|
python
|
def create_events(self, kwargs_list):
# Build map of uuid to event info
uuid_map = {
kwargs.get('uuid', ''): {
'actors': kwargs.pop('actors', []),
'ignore_duplicates': kwargs.pop('ignore_duplicates', False),
'event_kwargs': kwargs
}
for kwargs in kwargs_list
}
# Check for uuids
uuid_set = set(Event.objects.filter(uuid__in=uuid_map.keys()).values_list('uuid', flat=True))
# Set a flag for whether each uuid exists
for uuid, event_dict in uuid_map.items():
event_dict['exists'] = uuid in uuid_set
# Build list of events to bulk create
events_to_create = []
for uuid, event_dict in uuid_map.items():
# If the event doesn't already exist or the event does exist but we are allowing duplicates
if not event_dict['exists'] or not event_dict['ignore_duplicates']:
events_to_create.append(Event(**event_dict['event_kwargs']))
# Bulk create the events
created_events = Event.objects.bulk_create(events_to_create)
# Build list of EventActor objects to bulk create
event_actors_to_create = []
for created_event in created_events:
event_dict = uuid_map[created_event.uuid]
if event_dict['actors'] is not None:
for actor in event_dict['actors']:
actor_id = actor.id if hasattr(actor, 'id') else actor
event_actors_to_create.append(EventActor(entity_id=actor_id, event=created_event))
EventActor.objects.bulk_create(event_actors_to_create)
return created_events
|
Create events in bulk to save on queries. Each element in the kwargs list should be a dict with the same set
of arguments you would normally pass to create_event
:param kwargs_list: list of kwargs dicts
:return: list of Event
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/models.py#L990-L1036
| null |
class EventManager(models.Manager):
"""
A custom Manager for Events.
"""
def get_queryset(self):
"""
Return the EventQuerySet.
"""
return EventQuerySet(self.model)
def cache_related(self):
"""
Return a queryset with prefetched values
:return:
"""
return self.get_queryset().cache_related()
def mark_seen(self, medium):
"""
Creates EventSeen objects for the provided medium for every event
in the queryset.
Creating these EventSeen objects ensures they will not be
returned when passing ``seen=False`` to any of the medium
event retrieval functions, ``events``, ``entity_events``, or
``events_targets``.
"""
return self.get_queryset().mark_seen(medium)
def load_contexts_and_renderers(self, medium):
"""
Loads context data into the event ``context`` variable. This method
destroys the queryset and returns a list of events.
"""
return self.get_queryset().load_contexts_and_renderers(medium)
@transaction.atomic
def create_event(self, actors=None, ignore_duplicates=False, **kwargs):
"""
Create events with actors.
This method can be used in place of ``Event.objects.create``
to create events, and the appropriate actors. It takes all the
same keywords as ``Event.objects.create`` for the event
creation, but additionally takes a list of actors, and can be
told to not attempt to create an event if a duplicate event
exists.
:type source: Source
:param source: A ``Source`` object representing where the
event came from.
:type context: dict
:param context: A dictionary containing relevant
information about the event, to be serialized into
JSON. It is possible to load additional context
dynamically when events are fetched. See the
documentation on the ``ContextRenderer`` model.
:type uuid: str
:param uuid: A unique string for the event. Requiring a
``uuid`` allows code that creates events to ensure they do
not create duplicate events. This id could be, for example
some hash of the ``context``, or, if the creator is
unconcerned with creating duplicate events a call to
python's ``uuid1()`` in the ``uuid`` module.
:type time_expires: datetime (optional)
:param time_expires: If given, the default methods for
querying events will not return this event after this time
has passed.
:type actors: (optional) List of entities or list of entity ids.
:param actors: An ``EventActor`` object will be created for
each entity in the list. This allows for subscriptions
which are only following certain entities to behave
appropriately.
:type ignore_duplicates: (optional) Boolean
:param ignore_duplicates: If ``True``, a check will be made to
ensure that an event with the give ``uuid`` does not exist
before attempting to create the event. Setting this to
``True`` allows the creator of events to gracefully ensure
no duplicates are attempted to be created. There is a uniqueness constraint on uuid
so it will raise an exception if duplicates are allowed and submitted.
:rtype: Event
:returns: The created event. Alternatively if a duplicate
event already exists and ``ignore_duplicates`` is
``True``, it will return ``None``.
"""
kwargs['actors'] = actors
kwargs['ignore_duplicates'] = ignore_duplicates
events = self.create_events([kwargs])
if events:
return events[0]
return None
|
ambitioninc/django-entity-event
|
entity_event/context_serializer.py
|
DefaultContextSerializer.serialize_value
|
python
|
def serialize_value(self, value):
# Create a list of serialize methods to run the value through
serialize_methods = [
self.serialize_model,
self.serialize_json_string,
self.serialize_list,
self.serialize_dict
]
# Run all of our serialize methods over our value
for serialize_method in serialize_methods:
value = serialize_method(value)
# Return the serialized context value
return value
|
Given a value, ensure that it is serialized properly
:param value:
:return:
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_serializer.py#L29-L48
| null |
class DefaultContextSerializer(object):
"""
Default class for serializing context data
"""
def __init__(self, context):
super(DefaultContextSerializer, self).__init__()
self.context = context
@property
def data(self):
"""
Data property that will return the serialized data
:return:
"""
# Create a serialized context dict
serialized_context = self.serialize_value(self.context)
# Return the serialized context
return serialized_context
def serialize_model(self, value):
"""
Serializes a model and all of its prefetched foreign keys
:param value:
:return:
"""
# Check if the context value is a model
if not isinstance(value, models.Model):
return value
# Serialize the model
serialized_model = model_to_dict(value)
# Check the model for cached foreign keys
for model_field, model_value in serialized_model.items():
model_state = value._state
# Django >= 2
if hasattr(model_state, 'fields_cache'): # pragma: no cover
if model_state.fields_cache.get(model_field):
serialized_model[model_field] = model_state.fields_cache.get(model_field)
else: # pragma: no cover
# Django < 2
cache_field = '_{0}_cache'.format(model_field)
if hasattr(value, cache_field):
serialized_model[model_field] = getattr(value, cache_field)
# Return the serialized model
return self.serialize_value(serialized_model)
def serialize_json_string(self, value):
"""
Tries to load an encoded json string back into an object
:param json_string:
:return:
"""
# Check if the value might be a json string
if not isinstance(value, six.string_types):
return value
# Make sure it starts with a brace
if not value.startswith('{') or value.startswith('['):
return value
# Try to load the string
try:
return json.loads(value)
except:
return value
def serialize_list(self, value):
"""
Ensure that all values of a list or tuple are serialized
:return:
"""
# Check if this is a list or a tuple
if not isinstance(value, (list, tuple)):
return value
# Loop over all the values and serialize the values
return [
self.serialize_value(list_value)
for list_value in value
]
def serialize_dict(self, value):
"""
Ensure that all values of a dictionary are properly serialized
:param value:
:return:
"""
# Check if this is a dict
if not isinstance(value, dict):
return value
# Loop over all the values and serialize them
return {
dict_key: self.serialize_value(dict_value)
for dict_key, dict_value in value.items()
}
|
ambitioninc/django-entity-event
|
entity_event/context_serializer.py
|
DefaultContextSerializer.serialize_model
|
python
|
def serialize_model(self, value):
# Check if the context value is a model
if not isinstance(value, models.Model):
return value
# Serialize the model
serialized_model = model_to_dict(value)
# Check the model for cached foreign keys
for model_field, model_value in serialized_model.items():
model_state = value._state
# Django >= 2
if hasattr(model_state, 'fields_cache'): # pragma: no cover
if model_state.fields_cache.get(model_field):
serialized_model[model_field] = model_state.fields_cache.get(model_field)
else: # pragma: no cover
# Django < 2
cache_field = '_{0}_cache'.format(model_field)
if hasattr(value, cache_field):
serialized_model[model_field] = getattr(value, cache_field)
# Return the serialized model
return self.serialize_value(serialized_model)
|
Serializes a model and all of its prefetched foreign keys
:param value:
:return:
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_serializer.py#L50-L79
|
[
"def serialize_value(self, value):\n \"\"\"\n Given a value, ensure that it is serialized properly\n :param value:\n :return:\n \"\"\"\n # Create a list of serialize methods to run the value through\n serialize_methods = [\n self.serialize_model,\n self.serialize_json_string,\n self.serialize_list,\n self.serialize_dict\n ]\n\n # Run all of our serialize methods over our value\n for serialize_method in serialize_methods:\n value = serialize_method(value)\n\n # Return the serialized context value\n return value\n"
] |
class DefaultContextSerializer(object):
"""
Default class for serializing context data
"""
def __init__(self, context):
super(DefaultContextSerializer, self).__init__()
self.context = context
@property
def data(self):
"""
Data property that will return the serialized data
:return:
"""
# Create a serialized context dict
serialized_context = self.serialize_value(self.context)
# Return the serialized context
return serialized_context
def serialize_value(self, value):
"""
Given a value, ensure that it is serialized properly
:param value:
:return:
"""
# Create a list of serialize methods to run the value through
serialize_methods = [
self.serialize_model,
self.serialize_json_string,
self.serialize_list,
self.serialize_dict
]
# Run all of our serialize methods over our value
for serialize_method in serialize_methods:
value = serialize_method(value)
# Return the serialized context value
return value
def serialize_json_string(self, value):
"""
Tries to load an encoded json string back into an object
:param json_string:
:return:
"""
# Check if the value might be a json string
if not isinstance(value, six.string_types):
return value
# Make sure it starts with a brace
if not value.startswith('{') or value.startswith('['):
return value
# Try to load the string
try:
return json.loads(value)
except:
return value
def serialize_list(self, value):
"""
Ensure that all values of a list or tuple are serialized
:return:
"""
# Check if this is a list or a tuple
if not isinstance(value, (list, tuple)):
return value
# Loop over all the values and serialize the values
return [
self.serialize_value(list_value)
for list_value in value
]
def serialize_dict(self, value):
"""
Ensure that all values of a dictionary are properly serialized
:param value:
:return:
"""
# Check if this is a dict
if not isinstance(value, dict):
return value
# Loop over all the values and serialize them
return {
dict_key: self.serialize_value(dict_value)
for dict_key, dict_value in value.items()
}
|
ambitioninc/django-entity-event
|
entity_event/context_serializer.py
|
DefaultContextSerializer.serialize_json_string
|
python
|
def serialize_json_string(self, value):
# Check if the value might be a json string
if not isinstance(value, six.string_types):
return value
# Make sure it starts with a brace
if not value.startswith('{') or value.startswith('['):
return value
# Try to load the string
try:
return json.loads(value)
except:
return value
|
Tries to load an encoded json string back into an object
:param json_string:
:return:
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_serializer.py#L81-L100
| null |
class DefaultContextSerializer(object):
"""
Default class for serializing context data
"""
def __init__(self, context):
super(DefaultContextSerializer, self).__init__()
self.context = context
@property
def data(self):
"""
Data property that will return the serialized data
:return:
"""
# Create a serialized context dict
serialized_context = self.serialize_value(self.context)
# Return the serialized context
return serialized_context
def serialize_value(self, value):
"""
Given a value, ensure that it is serialized properly
:param value:
:return:
"""
# Create a list of serialize methods to run the value through
serialize_methods = [
self.serialize_model,
self.serialize_json_string,
self.serialize_list,
self.serialize_dict
]
# Run all of our serialize methods over our value
for serialize_method in serialize_methods:
value = serialize_method(value)
# Return the serialized context value
return value
def serialize_model(self, value):
"""
Serializes a model and all of its prefetched foreign keys
:param value:
:return:
"""
# Check if the context value is a model
if not isinstance(value, models.Model):
return value
# Serialize the model
serialized_model = model_to_dict(value)
# Check the model for cached foreign keys
for model_field, model_value in serialized_model.items():
model_state = value._state
# Django >= 2
if hasattr(model_state, 'fields_cache'): # pragma: no cover
if model_state.fields_cache.get(model_field):
serialized_model[model_field] = model_state.fields_cache.get(model_field)
else: # pragma: no cover
# Django < 2
cache_field = '_{0}_cache'.format(model_field)
if hasattr(value, cache_field):
serialized_model[model_field] = getattr(value, cache_field)
# Return the serialized model
return self.serialize_value(serialized_model)
def serialize_list(self, value):
"""
Ensure that all values of a list or tuple are serialized
:return:
"""
# Check if this is a list or a tuple
if not isinstance(value, (list, tuple)):
return value
# Loop over all the values and serialize the values
return [
self.serialize_value(list_value)
for list_value in value
]
def serialize_dict(self, value):
"""
Ensure that all values of a dictionary are properly serialized
:param value:
:return:
"""
# Check if this is a dict
if not isinstance(value, dict):
return value
# Loop over all the values and serialize them
return {
dict_key: self.serialize_value(dict_value)
for dict_key, dict_value in value.items()
}
|
ambitioninc/django-entity-event
|
entity_event/context_serializer.py
|
DefaultContextSerializer.serialize_list
|
python
|
def serialize_list(self, value):
# Check if this is a list or a tuple
if not isinstance(value, (list, tuple)):
return value
# Loop over all the values and serialize the values
return [
self.serialize_value(list_value)
for list_value in value
]
|
Ensure that all values of a list or tuple are serialized
:return:
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_serializer.py#L102-L116
| null |
class DefaultContextSerializer(object):
"""
Default class for serializing context data
"""
def __init__(self, context):
super(DefaultContextSerializer, self).__init__()
self.context = context
@property
def data(self):
"""
Data property that will return the serialized data
:return:
"""
# Create a serialized context dict
serialized_context = self.serialize_value(self.context)
# Return the serialized context
return serialized_context
def serialize_value(self, value):
"""
Given a value, ensure that it is serialized properly
:param value:
:return:
"""
# Create a list of serialize methods to run the value through
serialize_methods = [
self.serialize_model,
self.serialize_json_string,
self.serialize_list,
self.serialize_dict
]
# Run all of our serialize methods over our value
for serialize_method in serialize_methods:
value = serialize_method(value)
# Return the serialized context value
return value
def serialize_model(self, value):
"""
Serializes a model and all of its prefetched foreign keys
:param value:
:return:
"""
# Check if the context value is a model
if not isinstance(value, models.Model):
return value
# Serialize the model
serialized_model = model_to_dict(value)
# Check the model for cached foreign keys
for model_field, model_value in serialized_model.items():
model_state = value._state
# Django >= 2
if hasattr(model_state, 'fields_cache'): # pragma: no cover
if model_state.fields_cache.get(model_field):
serialized_model[model_field] = model_state.fields_cache.get(model_field)
else: # pragma: no cover
# Django < 2
cache_field = '_{0}_cache'.format(model_field)
if hasattr(value, cache_field):
serialized_model[model_field] = getattr(value, cache_field)
# Return the serialized model
return self.serialize_value(serialized_model)
def serialize_json_string(self, value):
"""
Tries to load an encoded json string back into an object
:param json_string:
:return:
"""
# Check if the value might be a json string
if not isinstance(value, six.string_types):
return value
# Make sure it starts with a brace
if not value.startswith('{') or value.startswith('['):
return value
# Try to load the string
try:
return json.loads(value)
except:
return value
def serialize_dict(self, value):
"""
Ensure that all values of a dictionary are properly serialized
:param value:
:return:
"""
# Check if this is a dict
if not isinstance(value, dict):
return value
# Loop over all the values and serialize them
return {
dict_key: self.serialize_value(dict_value)
for dict_key, dict_value in value.items()
}
|
ambitioninc/django-entity-event
|
entity_event/context_serializer.py
|
DefaultContextSerializer.serialize_dict
|
python
|
def serialize_dict(self, value):
# Check if this is a dict
if not isinstance(value, dict):
return value
# Loop over all the values and serialize them
return {
dict_key: self.serialize_value(dict_value)
for dict_key, dict_value in value.items()
}
|
Ensure that all values of a dictionary are properly serialized
:param value:
:return:
|
train
|
https://github.com/ambitioninc/django-entity-event/blob/70f50df133e42a7bf38d0f07fccc6d2890e5fd12/entity_event/context_serializer.py#L118-L133
| null |
class DefaultContextSerializer(object):
"""
Default class for serializing context data
"""
def __init__(self, context):
super(DefaultContextSerializer, self).__init__()
self.context = context
@property
def data(self):
"""
Data property that will return the serialized data
:return:
"""
# Create a serialized context dict
serialized_context = self.serialize_value(self.context)
# Return the serialized context
return serialized_context
def serialize_value(self, value):
"""
Given a value, ensure that it is serialized properly
:param value:
:return:
"""
# Create a list of serialize methods to run the value through
serialize_methods = [
self.serialize_model,
self.serialize_json_string,
self.serialize_list,
self.serialize_dict
]
# Run all of our serialize methods over our value
for serialize_method in serialize_methods:
value = serialize_method(value)
# Return the serialized context value
return value
def serialize_model(self, value):
"""
Serializes a model and all of its prefetched foreign keys
:param value:
:return:
"""
# Check if the context value is a model
if not isinstance(value, models.Model):
return value
# Serialize the model
serialized_model = model_to_dict(value)
# Check the model for cached foreign keys
for model_field, model_value in serialized_model.items():
model_state = value._state
# Django >= 2
if hasattr(model_state, 'fields_cache'): # pragma: no cover
if model_state.fields_cache.get(model_field):
serialized_model[model_field] = model_state.fields_cache.get(model_field)
else: # pragma: no cover
# Django < 2
cache_field = '_{0}_cache'.format(model_field)
if hasattr(value, cache_field):
serialized_model[model_field] = getattr(value, cache_field)
# Return the serialized model
return self.serialize_value(serialized_model)
def serialize_json_string(self, value):
"""
Tries to load an encoded json string back into an object
:param json_string:
:return:
"""
# Check if the value might be a json string
if not isinstance(value, six.string_types):
return value
# Make sure it starts with a brace
if not value.startswith('{') or value.startswith('['):
return value
# Try to load the string
try:
return json.loads(value)
except:
return value
def serialize_list(self, value):
"""
Ensure that all values of a list or tuple are serialized
:return:
"""
# Check if this is a list or a tuple
if not isinstance(value, (list, tuple)):
return value
# Loop over all the values and serialize the values
return [
self.serialize_value(list_value)
for list_value in value
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.