id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
7,000
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
Logger.subscribe
def subscribe(self, queue=None, *levels): """ Subscribe to the aggregated log stream. On subscribe a ledis queue will be fed with all running processes logs. Always use the returned queue name from this method, even if u specified the queue name to use Note: it is legal to subscribe to the same queue, but would be a bad logic if two processes are trying to read from the same queue. :param queue: Your unique queue name, otherwise, a one will get generated for your :param levels: :return: queue name to pull from """ args = { 'queue': queue, 'levels': list(levels), } self._subscribe_chk.check(args) return self._client.json('logger.subscribe', args)
python
def subscribe(self, queue=None, *levels): args = { 'queue': queue, 'levels': list(levels), } self._subscribe_chk.check(args) return self._client.json('logger.subscribe', args)
[ "def", "subscribe", "(", "self", ",", "queue", "=", "None", ",", "*", "levels", ")", ":", "args", "=", "{", "'queue'", ":", "queue", ",", "'levels'", ":", "list", "(", "levels", ")", ",", "}", "self", ".", "_subscribe_chk", ".", "check", "(", "args...
Subscribe to the aggregated log stream. On subscribe a ledis queue will be fed with all running processes logs. Always use the returned queue name from this method, even if u specified the queue name to use Note: it is legal to subscribe to the same queue, but would be a bad logic if two processes are trying to read from the same queue. :param queue: Your unique queue name, otherwise, a one will get generated for your :param levels: :return: queue name to pull from
[ "Subscribe", "to", "the", "aggregated", "log", "stream", ".", "On", "subscribe", "a", "ledis", "queue", "will", "be", "fed", "with", "all", "running", "processes", "logs", ".", "Always", "use", "the", "returned", "queue", "name", "from", "this", "method", ...
69f6ce845ab8b8ad805a79a415227e7ac566c218
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L2618-L2637
7,001
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
AggregatorManager.query
def query(self, key=None, **tags): """ Query zero-os aggregator for current state object of monitored metrics. Note: ID is returned as part of the key (if set) to avoid conflict with similar metrics that has same key. For example, a cpu core nr can be the id associated with 'machine.CPU.percent' so we can return all values for all the core numbers in the same dict. U can filter on the ID as a tag :example: self.query(key=key, id=value) :param key: metric key (ex: machine.memory.ram.available) :param tags: optional tags filter :return: dict of { 'key[/id]': state object } """ args = { 'key': key, 'tags': tags, } self._query_chk.check(args) return self._client.json('aggregator.query', args)
python
def query(self, key=None, **tags): args = { 'key': key, 'tags': tags, } self._query_chk.check(args) return self._client.json('aggregator.query', args)
[ "def", "query", "(", "self", ",", "key", "=", "None", ",", "*", "*", "tags", ")", ":", "args", "=", "{", "'key'", ":", "key", ",", "'tags'", ":", "tags", ",", "}", "self", ".", "_query_chk", ".", "check", "(", "args", ")", "return", "self", "."...
Query zero-os aggregator for current state object of monitored metrics. Note: ID is returned as part of the key (if set) to avoid conflict with similar metrics that has same key. For example, a cpu core nr can be the id associated with 'machine.CPU.percent' so we can return all values for all the core numbers in the same dict. U can filter on the ID as a tag :example: self.query(key=key, id=value) :param key: metric key (ex: machine.memory.ram.available) :param tags: optional tags filter :return: dict of { 'key[/id]': state object }
[ "Query", "zero", "-", "os", "aggregator", "for", "current", "state", "object", "of", "monitored", "metrics", "." ]
69f6ce845ab8b8ad805a79a415227e7ac566c218
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L2737-L2761
7,002
zero-os/0-core
client/py-client/zeroos/core0/client/client.py
CGroupManager.ensure
def ensure(self, subsystem, name): """ Creates a cgroup if it doesn't exist under the specified subsystem and the given name :param subsystem: the cgroup subsystem (currently support 'memory', and 'cpuset') :param name: name of the cgroup to delete """ args = { 'subsystem': subsystem, 'name': name, } self._cgroup_chk.check(args) return self._client.json('cgroup.ensure', args)
python
def ensure(self, subsystem, name): args = { 'subsystem': subsystem, 'name': name, } self._cgroup_chk.check(args) return self._client.json('cgroup.ensure', args)
[ "def", "ensure", "(", "self", ",", "subsystem", ",", "name", ")", ":", "args", "=", "{", "'subsystem'", ":", "subsystem", ",", "'name'", ":", "name", ",", "}", "self", ".", "_cgroup_chk", ".", "check", "(", "args", ")", "return", "self", ".", "_clien...
Creates a cgroup if it doesn't exist under the specified subsystem and the given name :param subsystem: the cgroup subsystem (currently support 'memory', and 'cpuset') :param name: name of the cgroup to delete
[ "Creates", "a", "cgroup", "if", "it", "doesn", "t", "exist", "under", "the", "specified", "subsystem", "and", "the", "given", "name" ]
69f6ce845ab8b8ad805a79a415227e7ac566c218
https://github.com/zero-os/0-core/blob/69f6ce845ab8b8ad805a79a415227e7ac566c218/client/py-client/zeroos/core0/client/client.py#L2854-L2868
7,003
vallis/libstempo
libstempo/spharmORFbasis.py
dlmk
def dlmk(l,m,k,theta1): """ returns value of d^l_mk as defined in allen, ottewill 97. Called by Dlmk """ if m >= k: factor = sqrt(factorial(l-k)*factorial(l+m)/factorial(l+k)/factorial(l-m)) part2 = (cos(theta1/2))**(2*l+k-m)*(-sin(theta1/2))**(m-k)/factorial(m-k) part3 = sp.hyp2f1(m-l,-k-l,m-k+1,-(tan(theta1/2))**2) return factor*part2*part3 else: return (-1)**(m-k) * dlmk(l,k,m,theta1)
python
def dlmk(l,m,k,theta1): if m >= k: factor = sqrt(factorial(l-k)*factorial(l+m)/factorial(l+k)/factorial(l-m)) part2 = (cos(theta1/2))**(2*l+k-m)*(-sin(theta1/2))**(m-k)/factorial(m-k) part3 = sp.hyp2f1(m-l,-k-l,m-k+1,-(tan(theta1/2))**2) return factor*part2*part3 else: return (-1)**(m-k) * dlmk(l,k,m,theta1)
[ "def", "dlmk", "(", "l", ",", "m", ",", "k", ",", "theta1", ")", ":", "if", "m", ">=", "k", ":", "factor", "=", "sqrt", "(", "factorial", "(", "l", "-", "k", ")", "*", "factorial", "(", "l", "+", "m", ")", "/", "factorial", "(", "l", "+", ...
returns value of d^l_mk as defined in allen, ottewill 97. Called by Dlmk
[ "returns", "value", "of", "d^l_mk", "as", "defined", "in", "allen", "ottewill", "97", ".", "Called", "by", "Dlmk" ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/spharmORFbasis.py#L173-L190
7,004
vallis/libstempo
libstempo/spharmORFbasis.py
Dlmk
def Dlmk(l,m,k,phi1,phi2,theta1,theta2): """ returns value of D^l_mk as defined in allen, ottewill 97. """ return exp(complex(0.,-m*phi1)) * dlmk(l,m,k,theta1) * \ exp(complex(0.,-k*gamma(phi1,phi2,theta1,theta2)))
python
def Dlmk(l,m,k,phi1,phi2,theta1,theta2): return exp(complex(0.,-m*phi1)) * dlmk(l,m,k,theta1) * \ exp(complex(0.,-k*gamma(phi1,phi2,theta1,theta2)))
[ "def", "Dlmk", "(", "l", ",", "m", ",", "k", ",", "phi1", ",", "phi2", ",", "theta1", ",", "theta2", ")", ":", "return", "exp", "(", "complex", "(", "0.", ",", "-", "m", "*", "phi1", ")", ")", "*", "dlmk", "(", "l", ",", "m", ",", "k", ",...
returns value of D^l_mk as defined in allen, ottewill 97.
[ "returns", "value", "of", "D^l_mk", "as", "defined", "in", "allen", "ottewill", "97", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/spharmORFbasis.py#L193-L200
7,005
vallis/libstempo
libstempo/spharmORFbasis.py
gamma
def gamma(phi1,phi2,theta1,theta2): """ calculate third rotation angle inputs are angles from 2 pulsars returns the angle. """ if phi1 == phi2 and theta1 == theta2: gamma = 0 else: gamma = atan( sin(theta2)*sin(phi2-phi1) / \ (cos(theta1)*sin(theta2)*cos(phi1-phi2) - \ sin(theta1)*cos(theta2)) ) dummy_arg = (cos(gamma)*cos(theta1)*sin(theta2)*cos(phi1-phi2) + \ sin(gamma)*sin(theta2)*sin(phi2-phi1) - \ cos(gamma)*sin(theta1)*cos(theta2)) if dummy_arg >= 0: return gamma else: return pi + gamma
python
def gamma(phi1,phi2,theta1,theta2): if phi1 == phi2 and theta1 == theta2: gamma = 0 else: gamma = atan( sin(theta2)*sin(phi2-phi1) / \ (cos(theta1)*sin(theta2)*cos(phi1-phi2) - \ sin(theta1)*cos(theta2)) ) dummy_arg = (cos(gamma)*cos(theta1)*sin(theta2)*cos(phi1-phi2) + \ sin(gamma)*sin(theta2)*sin(phi2-phi1) - \ cos(gamma)*sin(theta1)*cos(theta2)) if dummy_arg >= 0: return gamma else: return pi + gamma
[ "def", "gamma", "(", "phi1", ",", "phi2", ",", "theta1", ",", "theta2", ")", ":", "if", "phi1", "==", "phi2", "and", "theta1", "==", "theta2", ":", "gamma", "=", "0", "else", ":", "gamma", "=", "atan", "(", "sin", "(", "theta2", ")", "*", "sin", ...
calculate third rotation angle inputs are angles from 2 pulsars returns the angle.
[ "calculate", "third", "rotation", "angle", "inputs", "are", "angles", "from", "2", "pulsars", "returns", "the", "angle", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/spharmORFbasis.py#L203-L225
7,006
vallis/libstempo
libstempo/spharmORFbasis.py
rotated_Gamma_ml
def rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml): """ This function takes any gamma in the computational frame and rotates it to the cosmic frame. """ rotated_gamma = 0 for ii in range(2*l+1): rotated_gamma += Dlmk(l,m,ii-l,phi1,phi2,theta1,theta2).conjugate()*gamma_ml[ii] return rotated_gamma
python
def rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml): rotated_gamma = 0 for ii in range(2*l+1): rotated_gamma += Dlmk(l,m,ii-l,phi1,phi2,theta1,theta2).conjugate()*gamma_ml[ii] return rotated_gamma
[ "def", "rotated_Gamma_ml", "(", "m", ",", "l", ",", "phi1", ",", "phi2", ",", "theta1", ",", "theta2", ",", "gamma_ml", ")", ":", "rotated_gamma", "=", "0", "for", "ii", "in", "range", "(", "2", "*", "l", "+", "1", ")", ":", "rotated_gamma", "+=", ...
This function takes any gamma in the computational frame and rotates it to the cosmic frame.
[ "This", "function", "takes", "any", "gamma", "in", "the", "computational", "frame", "and", "rotates", "it", "to", "the", "cosmic", "frame", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/spharmORFbasis.py#L266-L278
7,007
vallis/libstempo
libstempo/spharmORFbasis.py
real_rotated_Gammas
def real_rotated_Gammas(m,l,phi1,phi2,theta1,theta2,gamma_ml): """ This function returns the real-valued form of the Overlap Reduction Functions, see Eqs 47 in Mingarelli et al, 2013. """ if m>0: ans=(1./sqrt(2))*(rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml) + \ (-1)**m*rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real if m==0: return rotated_Gamma_ml(0,l,phi1,phi2,theta1,theta2,gamma_ml).real if m<0: ans=(1./sqrt(2)/complex(0.,1))*(rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml) - \ (-1)**m*rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real
python
def real_rotated_Gammas(m,l,phi1,phi2,theta1,theta2,gamma_ml): if m>0: ans=(1./sqrt(2))*(rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml) + \ (-1)**m*rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real if m==0: return rotated_Gamma_ml(0,l,phi1,phi2,theta1,theta2,gamma_ml).real if m<0: ans=(1./sqrt(2)/complex(0.,1))*(rotated_Gamma_ml(-m,l,phi1,phi2,theta1,theta2,gamma_ml) - \ (-1)**m*rotated_Gamma_ml(m,l,phi1,phi2,theta1,theta2,gamma_ml)) return ans.real
[ "def", "real_rotated_Gammas", "(", "m", ",", "l", ",", "phi1", ",", "phi2", ",", "theta1", ",", "theta2", ",", "gamma_ml", ")", ":", "if", "m", ">", "0", ":", "ans", "=", "(", "1.", "/", "sqrt", "(", "2", ")", ")", "*", "(", "rotated_Gamma_ml", ...
This function returns the real-valued form of the Overlap Reduction Functions, see Eqs 47 in Mingarelli et al, 2013.
[ "This", "function", "returns", "the", "real", "-", "valued", "form", "of", "the", "Overlap", "Reduction", "Functions", "see", "Eqs", "47", "in", "Mingarelli", "et", "al", "2013", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/spharmORFbasis.py#L281-L297
7,008
vallis/libstempo
libstempo/fit.py
chisq
def chisq(psr,formbats=False): """Return the total chisq for the current timing solution, removing noise-averaged mean residual, and ignoring deleted points.""" if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) return numpy.sum(res * res / (1e-12 * err * err))
python
def chisq(psr,formbats=False): if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) return numpy.sum(res * res / (1e-12 * err * err))
[ "def", "chisq", "(", "psr", ",", "formbats", "=", "False", ")", ":", "if", "formbats", ":", "psr", ".", "formbats", "(", ")", "res", ",", "err", "=", "psr", ".", "residuals", "(", "removemean", "=", "False", ")", "[", "psr", ".", "deleted", "==", ...
Return the total chisq for the current timing solution, removing noise-averaged mean residual, and ignoring deleted points.
[ "Return", "the", "total", "chisq", "for", "the", "current", "timing", "solution", "removing", "noise", "-", "averaged", "mean", "residual", "and", "ignoring", "deleted", "points", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/fit.py#L4-L15
7,009
vallis/libstempo
libstempo/fit.py
dchisq
def dchisq(psr,formbats=False,renormalize=True): """Return gradient of total chisq for the current timing solution, after removing noise-averaged mean residual, and ignoring deleted points.""" if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) # bats already updated by residuals(); skip constant-phase column M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[psr.deleted==0,1:] # renormalize design-matrix columns if renormalize: norm = numpy.sqrt(numpy.sum(M**2,axis=0)) M /= norm else: norm = 1.0 # compute chisq derivative, de-renormalize dr = -2 * numpy.dot(M.T,res / (1e-12 * err**2)) * norm return dr
python
def dchisq(psr,formbats=False,renormalize=True): if formbats: psr.formbats() res, err = psr.residuals(removemean=False)[psr.deleted == 0], psr.toaerrs[psr.deleted == 0] res -= numpy.sum(res/err**2) / numpy.sum(1/err**2) # bats already updated by residuals(); skip constant-phase column M = psr.designmatrix(updatebats=False,fixunits=True,fixsigns=True)[psr.deleted==0,1:] # renormalize design-matrix columns if renormalize: norm = numpy.sqrt(numpy.sum(M**2,axis=0)) M /= norm else: norm = 1.0 # compute chisq derivative, de-renormalize dr = -2 * numpy.dot(M.T,res / (1e-12 * err**2)) * norm return dr
[ "def", "dchisq", "(", "psr", ",", "formbats", "=", "False", ",", "renormalize", "=", "True", ")", ":", "if", "formbats", ":", "psr", ".", "formbats", "(", ")", "res", ",", "err", "=", "psr", ".", "residuals", "(", "removemean", "=", "False", ")", "...
Return gradient of total chisq for the current timing solution, after removing noise-averaged mean residual, and ignoring deleted points.
[ "Return", "gradient", "of", "total", "chisq", "for", "the", "current", "timing", "solution", "after", "removing", "noise", "-", "averaged", "mean", "residual", "and", "ignoring", "deleted", "points", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/fit.py#L17-L41
7,010
vallis/libstempo
libstempo/utils.py
create_fourier_design_matrix
def create_fourier_design_matrix(t, nmodes, freq=False, Tspan=None, logf=False, fmin=None, fmax=None): """ Construct fourier design matrix from eq 11 of Lentati et al, 2013 :param t: vector of time series in seconds :param nmodes: number of fourier coefficients to use :param freq: option to output frequencies :param Tspan: option to some other Tspan :param logf: use log frequency spacing :param fmin: lower sampling frequency :param fmax: upper sampling frequency :return: F: fourier design matrix :return: f: Sampling frequencies (if freq=True) """ N = len(t) F = np.zeros((N, 2 * nmodes)) if Tspan is not None: T = Tspan else: T = t.max() - t.min() # define sampling frequencies if fmin is not None and fmax is not None: f = np.linspace(fmin, fmax, nmodes) else: f = np.linspace(1 / T, nmodes / T, nmodes) if logf: f = np.logspace(np.log10(1 / T), np.log10(nmodes / T), nmodes) Ffreqs = np.zeros(2 * nmodes) Ffreqs[0::2] = f Ffreqs[1::2] = f F[:,::2] = np.sin(2*np.pi*t[:,None]*f[None,:]) F[:,1::2] = np.cos(2*np.pi*t[:,None]*f[None,:]) if freq: return F, Ffreqs else: return F
python
def create_fourier_design_matrix(t, nmodes, freq=False, Tspan=None, logf=False, fmin=None, fmax=None): N = len(t) F = np.zeros((N, 2 * nmodes)) if Tspan is not None: T = Tspan else: T = t.max() - t.min() # define sampling frequencies if fmin is not None and fmax is not None: f = np.linspace(fmin, fmax, nmodes) else: f = np.linspace(1 / T, nmodes / T, nmodes) if logf: f = np.logspace(np.log10(1 / T), np.log10(nmodes / T), nmodes) Ffreqs = np.zeros(2 * nmodes) Ffreqs[0::2] = f Ffreqs[1::2] = f F[:,::2] = np.sin(2*np.pi*t[:,None]*f[None,:]) F[:,1::2] = np.cos(2*np.pi*t[:,None]*f[None,:]) if freq: return F, Ffreqs else: return F
[ "def", "create_fourier_design_matrix", "(", "t", ",", "nmodes", ",", "freq", "=", "False", ",", "Tspan", "=", "None", ",", "logf", "=", "False", ",", "fmin", "=", "None", ",", "fmax", "=", "None", ")", ":", "N", "=", "len", "(", "t", ")", "F", "=...
Construct fourier design matrix from eq 11 of Lentati et al, 2013 :param t: vector of time series in seconds :param nmodes: number of fourier coefficients to use :param freq: option to output frequencies :param Tspan: option to some other Tspan :param logf: use log frequency spacing :param fmin: lower sampling frequency :param fmax: upper sampling frequency :return: F: fourier design matrix :return: f: Sampling frequencies (if freq=True)
[ "Construct", "fourier", "design", "matrix", "from", "eq", "11", "of", "Lentati", "et", "al", "2013" ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/utils.py#L31-L74
7,011
vallis/libstempo
libstempo/utils.py
powerlaw
def powerlaw(f, log10_A=-16, gamma=5): """Power-law PSD. :param f: Sampling frequencies :param log10_A: log10 of red noise Amplitude [GW units] :param gamma: Spectral index of red noise process """ fyr = 1 / 3.16e7 return (10**log10_A)**2 / 12.0 / np.pi**2 * fyr**(gamma-3) * f**(-gamma)
python
def powerlaw(f, log10_A=-16, gamma=5): fyr = 1 / 3.16e7 return (10**log10_A)**2 / 12.0 / np.pi**2 * fyr**(gamma-3) * f**(-gamma)
[ "def", "powerlaw", "(", "f", ",", "log10_A", "=", "-", "16", ",", "gamma", "=", "5", ")", ":", "fyr", "=", "1", "/", "3.16e7", "return", "(", "10", "**", "log10_A", ")", "**", "2", "/", "12.0", "/", "np", ".", "pi", "**", "2", "*", "fyr", "...
Power-law PSD. :param f: Sampling frequencies :param log10_A: log10 of red noise Amplitude [GW units] :param gamma: Spectral index of red noise process
[ "Power", "-", "law", "PSD", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/utils.py#L77-L86
7,012
vallis/libstempo
libstempo/toasim.py
add_gwb
def add_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5, gwAmp=1e-20, alpha=-0.66, logspacing=True): """Add a stochastic background from inspiraling binaries, using the tempo2 code that underlies the GWbkgrd plugin. Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object """ gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) gwb.add_gwb(psr,dist) return gwb
python
def add_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5, gwAmp=1e-20, alpha=-0.66, logspacing=True): gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) gwb.add_gwb(psr,dist) return gwb
[ "def", "add_gwb", "(", "psr", ",", "dist", "=", "1", ",", "ngw", "=", "1000", ",", "seed", "=", "None", ",", "flow", "=", "1e-8", ",", "fhigh", "=", "1e-5", ",", "gwAmp", "=", "1e-20", ",", "alpha", "=", "-", "0.66", ",", "logspacing", "=", "Tr...
Add a stochastic background from inspiraling binaries, using the tempo2 code that underlies the GWbkgrd plugin. Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object
[ "Add", "a", "stochastic", "background", "from", "inspiraling", "binaries", "using", "the", "tempo2", "code", "that", "underlies", "the", "GWbkgrd", "plugin", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L32-L56
7,013
vallis/libstempo
libstempo/toasim.py
add_dipole_gwb
def add_dipole_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5, gwAmp=1e-20, alpha=-0.66, logspacing=True, dipoleamps=None, dipoledir=None, dipolemag=None): """Add a stochastic background from inspiraling binaries distributed according to a pure dipole distribution, using the tempo2 code that underlies the GWdipolebkgrd plugin. The basic use is identical to that of 'add_gwb': Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. Additionally, the dipole component can be specified by using one of two methods: 1) Specify the dipole direction as three dipole amplitudes, in the vector dipoleamps 2) Specify the direction of the dipole as a magnitude dipolemag, and a vector dipoledir=[dipolephi, dipoletheta] It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object """ gwb = GWB(ngw, seed, flow, fhigh, gwAmp, alpha, logspacing, dipoleamps, dipoledir, dipolemag) gwb.add_gwb(psr,dist) return gwb
python
def add_dipole_gwb(psr, dist=1, ngw=1000, seed=None, flow=1e-8, fhigh=1e-5, gwAmp=1e-20, alpha=-0.66, logspacing=True, dipoleamps=None, dipoledir=None, dipolemag=None): gwb = GWB(ngw, seed, flow, fhigh, gwAmp, alpha, logspacing, dipoleamps, dipoledir, dipolemag) gwb.add_gwb(psr,dist) return gwb
[ "def", "add_dipole_gwb", "(", "psr", ",", "dist", "=", "1", ",", "ngw", "=", "1000", ",", "seed", "=", "None", ",", "flow", "=", "1e-8", ",", "fhigh", "=", "1e-5", ",", "gwAmp", "=", "1e-20", ",", "alpha", "=", "-", "0.66", ",", "logspacing", "="...
Add a stochastic background from inspiraling binaries distributed according to a pure dipole distribution, using the tempo2 code that underlies the GWdipolebkgrd plugin. The basic use is identical to that of 'add_gwb': Here 'dist' is the pulsar distance [in kpc]; 'ngw' is the number of binaries, 'seed' (a negative integer) reseeds the GWbkgrd pseudorandom-number-generator, 'flow' and 'fhigh' [Hz] determine the background band, 'gwAmp' and 'alpha' determine its amplitude and exponent, and setting 'logspacing' to False will use linear spacing for the individual sources. Additionally, the dipole component can be specified by using one of two methods: 1) Specify the dipole direction as three dipole amplitudes, in the vector dipoleamps 2) Specify the direction of the dipole as a magnitude dipolemag, and a vector dipoledir=[dipolephi, dipoletheta] It is also possible to create a background object with gwb = GWB(ngw,seed,flow,fhigh,gwAmp,alpha,logspacing) then call the method gwb.add_gwb(pulsar[i],dist) repeatedly to get a consistent background for multiple pulsars. Returns the GWB object
[ "Add", "a", "stochastic", "background", "from", "inspiraling", "binaries", "distributed", "according", "to", "a", "pure", "dipole", "distribution", "using", "the", "tempo2", "code", "that", "underlies", "the", "GWdipolebkgrd", "plugin", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L58-L95
7,014
vallis/libstempo
libstempo/toasim.py
add_efac
def add_efac(psr, efac=1.0, flagid=None, flags=None, seed=None): """Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed.""" if seed is not None: N.random.seed(seed) # default efacvec efacvec = N.ones(psr.nobs) # check that efac is scalar if flags is None if flags is None: if not N.isscalar(efac): raise ValueError('ERROR: If flags is None, efac must be a scalar') else: efacvec = N.ones(psr.nobs) * efac if flags is not None and flagid is not None and not N.isscalar(efac): if len(efac) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) efacvec[ind] = efac[ct] psr.stoas[:] += efacvec * psr.toaerrs * (1e-6 / day) * N.random.randn(psr.nobs)
python
def add_efac(psr, efac=1.0, flagid=None, flags=None, seed=None): if seed is not None: N.random.seed(seed) # default efacvec efacvec = N.ones(psr.nobs) # check that efac is scalar if flags is None if flags is None: if not N.isscalar(efac): raise ValueError('ERROR: If flags is None, efac must be a scalar') else: efacvec = N.ones(psr.nobs) * efac if flags is not None and flagid is not None and not N.isscalar(efac): if len(efac) == len(flags): for ct, flag in enumerate(flags): ind = flag == N.array(psr.flagvals(flagid)) efacvec[ind] = efac[ct] psr.stoas[:] += efacvec * psr.toaerrs * (1e-6 / day) * N.random.randn(psr.nobs)
[ "def", "add_efac", "(", "psr", ",", "efac", "=", "1.0", ",", "flagid", "=", "None", ",", "flags", "=", "None", ",", "seed", "=", "None", ")", ":", "if", "seed", "is", "not", "None", ":", "N", ".", "random", ".", "seed", "(", "seed", ")", "# def...
Add nominal TOA errors, multiplied by `efac` factor. Optionally take a pseudorandom-number-generator seed.
[ "Add", "nominal", "TOA", "errors", "multiplied", "by", "efac", "factor", ".", "Optionally", "take", "a", "pseudorandom", "-", "number", "-", "generator", "seed", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L153-L176
7,015
vallis/libstempo
libstempo/toasim.py
extrap1d
def extrap1d(interpolator): """ Function to extend an interpolation function to an extrapolation function. :param interpolator: scipy interp1d object :returns ufunclike: extension of function to extrapolation """ xs = interpolator.x ys = interpolator.y def pointwise(x): if x < xs[0]: return ys[0] # +(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0]) elif x > xs[-1]: return ys[-1] # +(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2]) else: return interpolator(x) def ufunclike(xs): return N.array(map(pointwise, N.array(xs))) return ufunclike
python
def extrap1d(interpolator): xs = interpolator.x ys = interpolator.y def pointwise(x): if x < xs[0]: return ys[0] # +(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0]) elif x > xs[-1]: return ys[-1] # +(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2]) else: return interpolator(x) def ufunclike(xs): return N.array(map(pointwise, N.array(xs))) return ufunclike
[ "def", "extrap1d", "(", "interpolator", ")", ":", "xs", "=", "interpolator", ".", "x", "ys", "=", "interpolator", ".", "y", "def", "pointwise", "(", "x", ")", ":", "if", "x", "<", "xs", "[", "0", "]", ":", "return", "ys", "[", "0", "]", "# +(x-xs...
Function to extend an interpolation function to an extrapolation function. :param interpolator: scipy interp1d object :returns ufunclike: extension of function to extrapolation
[ "Function", "to", "extend", "an", "interpolation", "function", "to", "an", "extrapolation", "function", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L677-L701
7,016
vallis/libstempo
libstempo/toasim.py
computeORFMatrix
def computeORFMatrix(psr): """ Compute ORF matrix. :param psr: List of pulsar object instances :returns: Matrix that has the ORF values for every pulsar pair with 2 on the diagonals to account for the pulsar term. """ # begin loop over all pulsar pairs and calculate ORF npsr = len(psr) ORF = N.zeros((npsr, npsr)) phati = N.zeros(3) phatj = N.zeros(3) ptheta = [N.pi/2 - p['DECJ'].val for p in psr] pphi = [p['RAJ'].val for p in psr] for ll in range(0, npsr): phati[0] = N.cos(pphi[ll]) * N.sin(ptheta[ll]) phati[1] = N.sin(pphi[ll]) * N.sin(ptheta[ll]) phati[2] = N.cos(ptheta[ll]) for kk in range(0, npsr): phatj[0] = N.cos(pphi[kk]) * N.sin(ptheta[kk]) phatj[1] = N.sin(pphi[kk]) * N.sin(ptheta[kk]) phatj[2] = N.cos(ptheta[kk]) if ll != kk: xip = (1.-N.sum(phati*phatj)) / 2. ORF[ll, kk] = 3.*( 1./3. + xip * ( N.log(xip) -1./6.) ) else: ORF[ll, kk] = 2.0 return ORF
python
def computeORFMatrix(psr): # begin loop over all pulsar pairs and calculate ORF npsr = len(psr) ORF = N.zeros((npsr, npsr)) phati = N.zeros(3) phatj = N.zeros(3) ptheta = [N.pi/2 - p['DECJ'].val for p in psr] pphi = [p['RAJ'].val for p in psr] for ll in range(0, npsr): phati[0] = N.cos(pphi[ll]) * N.sin(ptheta[ll]) phati[1] = N.sin(pphi[ll]) * N.sin(ptheta[ll]) phati[2] = N.cos(ptheta[ll]) for kk in range(0, npsr): phatj[0] = N.cos(pphi[kk]) * N.sin(ptheta[kk]) phatj[1] = N.sin(pphi[kk]) * N.sin(ptheta[kk]) phatj[2] = N.cos(ptheta[kk]) if ll != kk: xip = (1.-N.sum(phati*phatj)) / 2. ORF[ll, kk] = 3.*( 1./3. + xip * ( N.log(xip) -1./6.) ) else: ORF[ll, kk] = 2.0 return ORF
[ "def", "computeORFMatrix", "(", "psr", ")", ":", "# begin loop over all pulsar pairs and calculate ORF", "npsr", "=", "len", "(", "psr", ")", "ORF", "=", "N", ".", "zeros", "(", "(", "npsr", ",", "npsr", ")", ")", "phati", "=", "N", ".", "zeros", "(", "3...
Compute ORF matrix. :param psr: List of pulsar object instances :returns: Matrix that has the ORF values for every pulsar pair with 2 on the diagonals to account for the pulsar term.
[ "Compute", "ORF", "matrix", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/toasim.py#L844-L879
7,017
vallis/libstempo
libstempo/plot.py
plotres
def plotres(psr,deleted=False,group=None,**kwargs): """Plot residuals, compute unweighted rms residual.""" res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs if (not deleted) and N.any(psr.deleted != 0): res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0] print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs)) meanres = math.sqrt(N.mean(res**2)) / 1e-6 if group is None: i = N.argsort(t) P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs) else: if (not deleted) and N.any(psr.deleted): flagmask = psr.flagvals(group)[~psr.deleted] else: flagmask = psr.flagvals(group) unique = list(set(flagmask)) for flagval in unique: f = (flagmask == flagval) flagres, flagt, flagerrs = res[f], t[f], errs[f] i = N.argsort(flagt) P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs) P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1)) P.xlabel('MJD'); P.ylabel('res [us]') P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
python
def plotres(psr,deleted=False,group=None,**kwargs): res, t, errs = psr.residuals(), psr.toas(), psr.toaerrs if (not deleted) and N.any(psr.deleted != 0): res, t, errs = res[psr.deleted == 0], t[psr.deleted == 0], errs[psr.deleted == 0] print("Plotting {0}/{1} nondeleted points.".format(len(res),psr.nobs)) meanres = math.sqrt(N.mean(res**2)) / 1e-6 if group is None: i = N.argsort(t) P.errorbar(t[i],res[i]/1e-6,yerr=errs[i],fmt='x',**kwargs) else: if (not deleted) and N.any(psr.deleted): flagmask = psr.flagvals(group)[~psr.deleted] else: flagmask = psr.flagvals(group) unique = list(set(flagmask)) for flagval in unique: f = (flagmask == flagval) flagres, flagt, flagerrs = res[f], t[f], errs[f] i = N.argsort(flagt) P.errorbar(flagt[i],flagres[i]/1e-6,yerr=flagerrs[i],fmt='x',**kwargs) P.legend(unique,numpoints=1,bbox_to_anchor=(1.1,1.1)) P.xlabel('MJD'); P.ylabel('res [us]') P.title("{0} - rms res = {1:.2f} us".format(psr.name,meanres))
[ "def", "plotres", "(", "psr", ",", "deleted", "=", "False", ",", "group", "=", "None", ",", "*", "*", "kwargs", ")", ":", "res", ",", "t", ",", "errs", "=", "psr", ".", "residuals", "(", ")", ",", "psr", ".", "toas", "(", ")", ",", "psr", "."...
Plot residuals, compute unweighted rms residual.
[ "Plot", "residuals", "compute", "unweighted", "rms", "residual", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/plot.py#L7-L38
7,018
vallis/libstempo
libstempo/plot.py
plotgwsrc
def plotgwsrc(gwb): """ Plot a GWB source population as a mollweide projection. """ theta, phi, omega, polarization = gwb.gw_dist() rho = phi-N.pi eta = 0.5*N.pi - theta # I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014: # /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485: # RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2)) #old_settings = N.seterr(invalid='ignore') P.title("GWB source population") ax = P.axes(projection='mollweide') foo = P.scatter(rho, eta, marker='.', s=1) #bar = N.seterr(**old_settings) return foo
python
def plotgwsrc(gwb): theta, phi, omega, polarization = gwb.gw_dist() rho = phi-N.pi eta = 0.5*N.pi - theta # I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014: # /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485: # RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2)) #old_settings = N.seterr(invalid='ignore') P.title("GWB source population") ax = P.axes(projection='mollweide') foo = P.scatter(rho, eta, marker='.', s=1) #bar = N.seterr(**old_settings) return foo
[ "def", "plotgwsrc", "(", "gwb", ")", ":", "theta", ",", "phi", ",", "omega", ",", "polarization", "=", "gwb", ".", "gw_dist", "(", ")", "rho", "=", "phi", "-", "N", ".", "pi", "eta", "=", "0.5", "*", "N", ".", "pi", "-", "theta", "# I don't know ...
Plot a GWB source population as a mollweide projection.
[ "Plot", "a", "GWB", "source", "population", "as", "a", "mollweide", "projection", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/plot.py#L304-L324
7,019
vallis/libstempo
libstempo/emcee.py
merge
def merge(data,skip=50,fraction=1.0): """Merge one every 'skip' clouds into a single emcee population, using the later 'fraction' of the run.""" w,s,d = data.chains.shape start = int((1.0 - fraction) * s) total = int((s - start) / skip) return data.chains[:,start::skip,:].reshape((w*total,d))
python
def merge(data,skip=50,fraction=1.0): w,s,d = data.chains.shape start = int((1.0 - fraction) * s) total = int((s - start) / skip) return data.chains[:,start::skip,:].reshape((w*total,d))
[ "def", "merge", "(", "data", ",", "skip", "=", "50", ",", "fraction", "=", "1.0", ")", ":", "w", ",", "s", ",", "d", "=", "data", ".", "chains", ".", "shape", "start", "=", "int", "(", "(", "1.0", "-", "fraction", ")", "*", "s", ")", "total",...
Merge one every 'skip' clouds into a single emcee population, using the later 'fraction' of the run.
[ "Merge", "one", "every", "skip", "clouds", "into", "a", "single", "emcee", "population", "using", "the", "later", "fraction", "of", "the", "run", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/emcee.py#L46-L55
7,020
vallis/libstempo
libstempo/emcee.py
cull
def cull(data,index,min=None,max=None): """Sieve an emcee clouds by excluding walkers with search variable 'index' smaller than 'min' or larger than 'max'.""" ret = data if min is not None: ret = ret[ret[:,index] > min,:] if max is not None: ret = ret[ret[:,index] < max,:] return ret
python
def cull(data,index,min=None,max=None): ret = data if min is not None: ret = ret[ret[:,index] > min,:] if max is not None: ret = ret[ret[:,index] < max,:] return ret
[ "def", "cull", "(", "data", ",", "index", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "ret", "=", "data", "if", "min", "is", "not", "None", ":", "ret", "=", "ret", "[", "ret", "[", ":", ",", "index", "]", ">", "min", ",", ":...
Sieve an emcee clouds by excluding walkers with search variable 'index' smaller than 'min' or larger than 'max'.
[ "Sieve", "an", "emcee", "clouds", "by", "excluding", "walkers", "with", "search", "variable", "index", "smaller", "than", "min", "or", "larger", "than", "max", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/emcee.py#L57-L69
7,021
vallis/libstempo
libstempo/eccUtils.py
make_ecc_interpolant
def make_ecc_interpolant(): """ Make interpolation function from eccentricity file to determine number of harmonics to use for a given eccentricity. :returns: interpolant """ pth = resource_filename(Requirement.parse('libstempo'), 'libstempo/ecc_vs_nharm.txt') fil = np.loadtxt(pth) return interp1d(fil[:,0], fil[:,1])
python
def make_ecc_interpolant(): pth = resource_filename(Requirement.parse('libstempo'), 'libstempo/ecc_vs_nharm.txt') fil = np.loadtxt(pth) return interp1d(fil[:,0], fil[:,1])
[ "def", "make_ecc_interpolant", "(", ")", ":", "pth", "=", "resource_filename", "(", "Requirement", ".", "parse", "(", "'libstempo'", ")", ",", "'libstempo/ecc_vs_nharm.txt'", ")", "fil", "=", "np", ".", "loadtxt", "(", "pth", ")", "return", "interp1d", "(", ...
Make interpolation function from eccentricity file to determine number of harmonics to use for a given eccentricity. :returns: interpolant
[ "Make", "interpolation", "function", "from", "eccentricity", "file", "to", "determine", "number", "of", "harmonics", "to", "use", "for", "a", "given", "eccentricity", "." ]
0b19300a9b24d64c9ddc25cd6ddbfd12b6231990
https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/eccUtils.py#L28-L41
7,022
gagneurlab/concise
concise/legacy/kmer.py
best_kmers
def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1, seq_align="start", trim_seq_len=None): """ Find best k-mers for CONCISE initialization. Args: dt (pd.DataFrame): Table containing response variable and sequence. response (str): Name of the column used as the reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. k (int): Desired k-mer length. n_cores (int): Number of cores to use for computation. It can use up to 3 cores. consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG? seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. Returns: string list: Best set of motifs for this dataset sorted with respect to confidence (best candidate occuring first). Details: First a lasso model gets fitted to get a set of initial motifs. Next, the best subset of unrelated motifs is selected by stepwise selection. """ y = dt[response] seq = dt[sequence] if trim_seq_len is not None: seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len) seq = [s.replace("N", "") for s in seq] dt_kmer = kmer_count(seq, k) Xsp = csc_matrix(dt_kmer) en = ElasticNet(alpha=1, standardize=False, n_splits=3) en.fit(Xsp, y) # which coefficients are nonzero?= nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist() # perform stepwise selection # # TODO - how do we deal with the intercept? # largest number of motifs where they don't differ by more than 1 k-mer def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True): """ perform stepwise model selection while preventing to add a motif similar to the already selected motifs. """ F, pval = f_regression(dt_kmer[to_be_selected_kmers], y) kmer = to_be_selected_kmers.pop(pval.argmin()) selected_kmers.append(kmer) def select_criterion(s1, s2, consider_shift=True): if hamming_distance(s1, s2) <= 1: return False if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0: return False if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0: return False return True to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers if select_criterion(ckmer, kmer, consider_shift)] if len(to_be_selected_kmers) == 0: return selected_kmers else: # regress out the new feature lm = LinearRegression() lm.fit(dt_kmer[selected_kmers], y) y_new = y - lm.predict(dt_kmer[selected_kmers]) return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift) selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift) return selected_kmers
python
def best_kmers(dt, response, sequence, k=6, consider_shift=True, n_cores=1, seq_align="start", trim_seq_len=None): y = dt[response] seq = dt[sequence] if trim_seq_len is not None: seq = pad_sequences(seq, align=seq_align, maxlen=trim_seq_len) seq = [s.replace("N", "") for s in seq] dt_kmer = kmer_count(seq, k) Xsp = csc_matrix(dt_kmer) en = ElasticNet(alpha=1, standardize=False, n_splits=3) en.fit(Xsp, y) # which coefficients are nonzero?= nonzero_kmers = dt_kmer.columns.values[en.coef_ != 0].tolist() # perform stepwise selection # # TODO - how do we deal with the intercept? # largest number of motifs where they don't differ by more than 1 k-mer def find_next_best(dt_kmer, y, selected_kmers, to_be_selected_kmers, consider_shift=True): """ perform stepwise model selection while preventing to add a motif similar to the already selected motifs. """ F, pval = f_regression(dt_kmer[to_be_selected_kmers], y) kmer = to_be_selected_kmers.pop(pval.argmin()) selected_kmers.append(kmer) def select_criterion(s1, s2, consider_shift=True): if hamming_distance(s1, s2) <= 1: return False if consider_shift and hamming_distance(s1[1:], s2[:-1]) == 0: return False if consider_shift and hamming_distance(s1[:-1], s2[1:]) == 0: return False return True to_be_selected_kmers = [ckmer for ckmer in to_be_selected_kmers if select_criterion(ckmer, kmer, consider_shift)] if len(to_be_selected_kmers) == 0: return selected_kmers else: # regress out the new feature lm = LinearRegression() lm.fit(dt_kmer[selected_kmers], y) y_new = y - lm.predict(dt_kmer[selected_kmers]) return find_next_best(dt_kmer, y_new, selected_kmers, to_be_selected_kmers, consider_shift) selected_kmers = find_next_best(dt_kmer, y, [], nonzero_kmers, consider_shift) return selected_kmers
[ "def", "best_kmers", "(", "dt", ",", "response", ",", "sequence", ",", "k", "=", "6", ",", "consider_shift", "=", "True", ",", "n_cores", "=", "1", ",", "seq_align", "=", "\"start\"", ",", "trim_seq_len", "=", "None", ")", ":", "y", "=", "dt", "[", ...
Find best k-mers for CONCISE initialization. Args: dt (pd.DataFrame): Table containing response variable and sequence. response (str): Name of the column used as the reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. k (int): Desired k-mer length. n_cores (int): Number of cores to use for computation. It can use up to 3 cores. consider_shift (boolean): When performing stepwise k-mer selection. Is TATTTA similar to ATTTAG? seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. Returns: string list: Best set of motifs for this dataset sorted with respect to confidence (best candidate occuring first). Details: First a lasso model gets fitted to get a set of initial motifs. Next, the best subset of unrelated motifs is selected by stepwise selection.
[ "Find", "best", "k", "-", "mers", "for", "CONCISE", "initialization", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/kmer.py#L28-L102
7,023
gagneurlab/concise
concise/legacy/kmer.py
kmer_count
def kmer_count(seq_list, k): """ Generate k-mer counts from a set of sequences Args: seq_list (iterable): List of DNA sequences (with letters from {A, C, G, T}) k (int): K in k-mer. Returns: pandas.DataFrame: Count matrix for seach sequence in seq_list Example: >>> kmer_count(["ACGTTAT", "GACGCGA"], 2) AA AC AG AT CA CC CG CT GA GC GG GT TA TC TG TT 0 0 1 0 1 0 0 1 0 0 0 0 1 1 0 0 1 1 0 1 0 0 0 0 2 0 2 1 0 0 0 0 0 0 """ # generate all k-mers all_kmers = generate_all_kmers(k) kmer_count_list = [] for seq in seq_list: kmer_count_list.append([seq.count(kmer) for kmer in all_kmers]) return pd.DataFrame(kmer_count_list, columns=all_kmers)
python
def kmer_count(seq_list, k): # generate all k-mers all_kmers = generate_all_kmers(k) kmer_count_list = [] for seq in seq_list: kmer_count_list.append([seq.count(kmer) for kmer in all_kmers]) return pd.DataFrame(kmer_count_list, columns=all_kmers)
[ "def", "kmer_count", "(", "seq_list", ",", "k", ")", ":", "# generate all k-mers", "all_kmers", "=", "generate_all_kmers", "(", "k", ")", "kmer_count_list", "=", "[", "]", "for", "seq", "in", "seq_list", ":", "kmer_count_list", ".", "append", "(", "[", "seq"...
Generate k-mer counts from a set of sequences Args: seq_list (iterable): List of DNA sequences (with letters from {A, C, G, T}) k (int): K in k-mer. Returns: pandas.DataFrame: Count matrix for seach sequence in seq_list Example: >>> kmer_count(["ACGTTAT", "GACGCGA"], 2) AA AC AG AT CA CC CG CT GA GC GG GT TA TC TG TT 0 0 1 0 1 0 0 1 0 0 0 0 1 1 0 0 1 1 0 1 0 0 0 0 2 0 2 1 0 0 0 0 0 0
[ "Generate", "k", "-", "mer", "counts", "from", "a", "set", "of", "sequences" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/kmer.py#L107-L128
7,024
gagneurlab/concise
concise/legacy/kmer.py
generate_all_kmers
def generate_all_kmers(k): """ Generate all possible k-mers Example: >>> generate_all_kmers(2) ['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT'] """ bases = ['A', 'C', 'G', 'T'] return [''.join(p) for p in itertools.product(bases, repeat=k)]
python
def generate_all_kmers(k): bases = ['A', 'C', 'G', 'T'] return [''.join(p) for p in itertools.product(bases, repeat=k)]
[ "def", "generate_all_kmers", "(", "k", ")", ":", "bases", "=", "[", "'A'", ",", "'C'", ",", "'G'", ",", "'T'", "]", "return", "[", "''", ".", "join", "(", "p", ")", "for", "p", "in", "itertools", ".", "product", "(", "bases", ",", "repeat", "=", ...
Generate all possible k-mers Example: >>> generate_all_kmers(2) ['AA', 'AC', 'AG', 'AT', 'CA', 'CC', 'CG', 'CT', 'GA', 'GC', 'GG', 'GT', 'TA', 'TC', 'TG', 'TT']
[ "Generate", "all", "possible", "k", "-", "mers" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/kmer.py#L130-L139
7,025
gagneurlab/concise
concise/utils/helper.py
dict_to_numpy_dict
def dict_to_numpy_dict(obj_dict): """ Convert a dictionary of lists into a dictionary of numpy arrays """ return {key: np.asarray(value) if value is not None else None for key, value in obj_dict.items()}
python
def dict_to_numpy_dict(obj_dict): return {key: np.asarray(value) if value is not None else None for key, value in obj_dict.items()}
[ "def", "dict_to_numpy_dict", "(", "obj_dict", ")", ":", "return", "{", "key", ":", "np", ".", "asarray", "(", "value", ")", "if", "value", "is", "not", "None", "else", "None", "for", "key", ",", "value", "in", "obj_dict", ".", "items", "(", ")", "}" ...
Convert a dictionary of lists into a dictionary of numpy arrays
[ "Convert", "a", "dictionary", "of", "lists", "into", "a", "dictionary", "of", "numpy", "arrays" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/helper.py#L65-L69
7,026
gagneurlab/concise
concise/utils/helper.py
rec_dict_to_numpy_dict
def rec_dict_to_numpy_dict(obj_dict): """ Same as dict_to_numpy_dict, but recursive """ if type(obj_dict) == dict: return {key: rec_dict_to_numpy_dict(value) if value is not None else None for key, value in obj_dict.items()} elif obj_dict is None: return None else: return np.asarray(obj_dict)
python
def rec_dict_to_numpy_dict(obj_dict): if type(obj_dict) == dict: return {key: rec_dict_to_numpy_dict(value) if value is not None else None for key, value in obj_dict.items()} elif obj_dict is None: return None else: return np.asarray(obj_dict)
[ "def", "rec_dict_to_numpy_dict", "(", "obj_dict", ")", ":", "if", "type", "(", "obj_dict", ")", "==", "dict", ":", "return", "{", "key", ":", "rec_dict_to_numpy_dict", "(", "value", ")", "if", "value", "is", "not", "None", "else", "None", "for", "key", "...
Same as dict_to_numpy_dict, but recursive
[ "Same", "as", "dict_to_numpy_dict", "but", "recursive" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/helper.py#L72-L81
7,027
gagneurlab/concise
concise/utils/helper.py
compare_numpy_dict
def compare_numpy_dict(a, b, exact=True): """ Compare two recursive numpy dictionaries """ if type(a) != type(b) and type(a) != np.ndarray and type(b) != np.ndarray: return False # go through a dictionary if type(a) == dict and type(b) == dict: if not a.keys() == b.keys(): return False for key in a.keys(): res = compare_numpy_dict(a[key], b[key], exact) if res == False: print("false for key = ", key) return False return True # if type(a) == np.ndarray and type(b) == np.ndarray: if type(a) == np.ndarray or type(b) == np.ndarray: if exact: return (a == b).all() else: return np.testing.assert_almost_equal(a, b) if a is None and b is None: return True raise NotImplementedError
python
def compare_numpy_dict(a, b, exact=True): if type(a) != type(b) and type(a) != np.ndarray and type(b) != np.ndarray: return False # go through a dictionary if type(a) == dict and type(b) == dict: if not a.keys() == b.keys(): return False for key in a.keys(): res = compare_numpy_dict(a[key], b[key], exact) if res == False: print("false for key = ", key) return False return True # if type(a) == np.ndarray and type(b) == np.ndarray: if type(a) == np.ndarray or type(b) == np.ndarray: if exact: return (a == b).all() else: return np.testing.assert_almost_equal(a, b) if a is None and b is None: return True raise NotImplementedError
[ "def", "compare_numpy_dict", "(", "a", ",", "b", ",", "exact", "=", "True", ")", ":", "if", "type", "(", "a", ")", "!=", "type", "(", "b", ")", "and", "type", "(", "a", ")", "!=", "np", ".", "ndarray", "and", "type", "(", "b", ")", "!=", "np"...
Compare two recursive numpy dictionaries
[ "Compare", "two", "recursive", "numpy", "dictionaries" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/helper.py#L84-L111
7,028
gagneurlab/concise
concise/utils/splines.py
BSpline.getS
def getS(self, add_intercept=False): """Get the penalty matrix S Returns np.array, of shape (n_bases + add_intercept, n_bases + add_intercept) """ S = self.S if add_intercept is True: # S <- cbind(0, rbind(0, S)) # in R zeros = np.zeros_like(S[:1, :]) S = np.vstack([zeros, S]) zeros = np.zeros_like(S[:, :1]) S = np.hstack([zeros, S]) return S
python
def getS(self, add_intercept=False): S = self.S if add_intercept is True: # S <- cbind(0, rbind(0, S)) # in R zeros = np.zeros_like(S[:1, :]) S = np.vstack([zeros, S]) zeros = np.zeros_like(S[:, :1]) S = np.hstack([zeros, S]) return S
[ "def", "getS", "(", "self", ",", "add_intercept", "=", "False", ")", ":", "S", "=", "self", ".", "S", "if", "add_intercept", "is", "True", ":", "# S <- cbind(0, rbind(0, S)) # in R", "zeros", "=", "np", ".", "zeros_like", "(", "S", "[", ":", "1", ",", ...
Get the penalty matrix S Returns np.array, of shape (n_bases + add_intercept, n_bases + add_intercept)
[ "Get", "the", "penalty", "matrix", "S" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/splines.py#L49-L63
7,029
gagneurlab/concise
concise/data/encode.py
get_pwm_list
def get_pwm_list(motif_name_list, pseudocountProb=0.0001): """Get a list of ENCODE PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = _load_motifs() l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[m] + pseudocountProb, name=m) for m in motif_name_list] return pwm_list
python
def get_pwm_list(motif_name_list, pseudocountProb=0.0001): l = _load_motifs() l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[m] + pseudocountProb, name=m) for m in motif_name_list] return pwm_list
[ "def", "get_pwm_list", "(", "motif_name_list", ",", "pseudocountProb", "=", "0.0001", ")", ":", "l", "=", "_load_motifs", "(", ")", "l", "=", "{", "k", ".", "split", "(", ")", "[", "0", "]", ":", "v", "for", "k", ",", "v", "in", "l", ".", "items"...
Get a list of ENCODE PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances.
[ "Get", "a", "list", "of", "ENCODE", "PWM", "s", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/data/encode.py#L34-L47
7,030
gagneurlab/concise
concise/eval_metrics.py
auc
def auc(y_true, y_pred, round=True): """Area under the ROC curve """ y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = y_true.round() if len(y_true) == 0 or len(np.unique(y_true)) < 2: return np.nan return skm.roc_auc_score(y_true, y_pred)
python
def auc(y_true, y_pred, round=True): y_true, y_pred = _mask_value_nan(y_true, y_pred) if round: y_true = y_true.round() if len(y_true) == 0 or len(np.unique(y_true)) < 2: return np.nan return skm.roc_auc_score(y_true, y_pred)
[ "def", "auc", "(", "y_true", ",", "y_pred", ",", "round", "=", "True", ")", ":", "y_true", ",", "y_pred", "=", "_mask_value_nan", "(", "y_true", ",", "y_pred", ")", "if", "round", ":", "y_true", "=", "y_true", ".", "round", "(", ")", "if", "len", "...
Area under the ROC curve
[ "Area", "under", "the", "ROC", "curve" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L37-L46
7,031
gagneurlab/concise
concise/eval_metrics.py
recall_at_precision
def recall_at_precision(y_true, y_pred, precision): """Recall at a certain precision threshold Args: y_true: true labels y_pred: predicted labels precision: resired precision level at which where to compute the recall """ y_true, y_pred = _mask_value_nan(y_true, y_pred) precision, recall, _ = skm.precision_recall_curve(y_true, y_pred) return recall[np.searchsorted(precision - precision, 0)]
python
def recall_at_precision(y_true, y_pred, precision): y_true, y_pred = _mask_value_nan(y_true, y_pred) precision, recall, _ = skm.precision_recall_curve(y_true, y_pred) return recall[np.searchsorted(precision - precision, 0)]
[ "def", "recall_at_precision", "(", "y_true", ",", "y_pred", ",", "precision", ")", ":", "y_true", ",", "y_pred", "=", "_mask_value_nan", "(", "y_true", ",", "y_pred", ")", "precision", ",", "recall", ",", "_", "=", "skm", ".", "precision_recall_curve", "(", ...
Recall at a certain precision threshold Args: y_true: true labels y_pred: predicted labels precision: resired precision level at which where to compute the recall
[ "Recall", "at", "a", "certain", "precision", "threshold" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L57-L67
7,032
gagneurlab/concise
concise/eval_metrics.py
cor
def cor(y_true, y_pred): """Compute Pearson correlation coefficient. """ y_true, y_pred = _mask_nan(y_true, y_pred) return np.corrcoef(y_true, y_pred)[0, 1]
python
def cor(y_true, y_pred): y_true, y_pred = _mask_nan(y_true, y_pred) return np.corrcoef(y_true, y_pred)[0, 1]
[ "def", "cor", "(", "y_true", ",", "y_pred", ")", ":", "y_true", ",", "y_pred", "=", "_mask_nan", "(", "y_true", ",", "y_pred", ")", "return", "np", ".", "corrcoef", "(", "y_true", ",", "y_pred", ")", "[", "0", ",", "1", "]" ]
Compute Pearson correlation coefficient.
[ "Compute", "Pearson", "correlation", "coefficient", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L131-L135
7,033
gagneurlab/concise
concise/eval_metrics.py
kendall
def kendall(y_true, y_pred, nb_sample=100000): """Kendall's tau coefficient, Kendall rank correlation coefficient """ y_true, y_pred = _mask_nan(y_true, y_pred) if len(y_true) > nb_sample: idx = np.arange(len(y_true)) np.random.shuffle(idx) idx = idx[:nb_sample] y_true = y_true[idx] y_pred = y_pred[idx] return kendalltau(y_true, y_pred)[0]
python
def kendall(y_true, y_pred, nb_sample=100000): y_true, y_pred = _mask_nan(y_true, y_pred) if len(y_true) > nb_sample: idx = np.arange(len(y_true)) np.random.shuffle(idx) idx = idx[:nb_sample] y_true = y_true[idx] y_pred = y_pred[idx] return kendalltau(y_true, y_pred)[0]
[ "def", "kendall", "(", "y_true", ",", "y_pred", ",", "nb_sample", "=", "100000", ")", ":", "y_true", ",", "y_pred", "=", "_mask_nan", "(", "y_true", ",", "y_pred", ")", "if", "len", "(", "y_true", ")", ">", "nb_sample", ":", "idx", "=", "np", ".", ...
Kendall's tau coefficient, Kendall rank correlation coefficient
[ "Kendall", "s", "tau", "coefficient", "Kendall", "rank", "correlation", "coefficient" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L138-L148
7,034
gagneurlab/concise
concise/eval_metrics.py
mad
def mad(y_true, y_pred): """Median absolute deviation """ y_true, y_pred = _mask_nan(y_true, y_pred) return np.mean(np.abs(y_true - y_pred))
python
def mad(y_true, y_pred): y_true, y_pred = _mask_nan(y_true, y_pred) return np.mean(np.abs(y_true - y_pred))
[ "def", "mad", "(", "y_true", ",", "y_pred", ")", ":", "y_true", ",", "y_pred", "=", "_mask_nan", "(", "y_true", ",", "y_pred", ")", "return", "np", ".", "mean", "(", "np", ".", "abs", "(", "y_true", "-", "y_pred", ")", ")" ]
Median absolute deviation
[ "Median", "absolute", "deviation" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L151-L155
7,035
gagneurlab/concise
concise/eval_metrics.py
mse
def mse(y_true, y_pred): """Mean squared error """ y_true, y_pred = _mask_nan(y_true, y_pred) return ((y_true - y_pred) ** 2).mean(axis=None)
python
def mse(y_true, y_pred): y_true, y_pred = _mask_nan(y_true, y_pred) return ((y_true - y_pred) ** 2).mean(axis=None)
[ "def", "mse", "(", "y_true", ",", "y_pred", ")", ":", "y_true", ",", "y_pred", "=", "_mask_nan", "(", "y_true", ",", "y_pred", ")", "return", "(", "(", "y_true", "-", "y_pred", ")", "**", "2", ")", ".", "mean", "(", "axis", "=", "None", ")" ]
Mean squared error
[ "Mean", "squared", "error" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/eval_metrics.py#L170-L174
7,036
gagneurlab/concise
concise/legacy/args_sampler.py
sample_params
def sample_params(params): """Randomly sample hyper-parameters stored in a dictionary on a predefined range and scale. Useful for hyper-parameter random search. Args: params (dict): hyper-parameters to sample. Dictionary value-type parsing: - :python:`[1e3, 1e7]` - uniformly sample on a **log10** scale from the interval :python:`(1e3,1e7)` - :python:`(1, 10)` - uniformly sample on a **normal** scale from the interval :python:`(1,10)` - :python:`{1, 2}` - sample from a **set** of values. - :python:`1` - don't sample Returns: dict: Dictionary with the same keys as :py:attr:`params`, but with only one element as the value. Examples: >>> myparams = { "max_pool": True, # allways use True "step_size": [0.09, 0.005], "step_decay": (0.9, 1), "n_splines": {10, None}, # use either 10 or None "some_tuple": {(1,2), (1)}, } >>> concise.sample_params(myparams) {'step_decay': 0.9288, 'step_size': 0.0292, 'max_pool': True, 'n_splines': None, 'some_tuple': (1, 2)} >>> concise.sample_params(myparams) {'step_decay': 0.9243, 'step_size': 0.0293, 'max_pool': True, 'n_splines': None, 'some_tuple': (1)} >>> concise.sample_params(myparams) {'step_decay': 0.9460, 'step_size': 0.0301, 'max_pool': True, 'n_splines': 10, 'some_tuple': (1, 2)} Note: - :python:`{[1,2], [3,4]}` is invalid. Use :python:`{(1,2), (3,4)}` instead. - You can allways use :python:`{}` with a single element to by-pass sampling. """ def sample_log(myrange): x = np.random.uniform(np.log10(myrange[0]), np.log10(myrange[1])) return 10**x def sample_unif(myrange): x = np.random.uniform(myrange[0], myrange[1]) return x def sample_set(myset): x = random.sample(myset, 1) return x[0] def type_dep_sample(myrange): if type(myrange) is list: return sample_log(myrange) if type(myrange) is tuple: return sample_unif(myrange) if type(myrange) is set: return sample_set(myrange) return myrange return {k: type_dep_sample(v) for k, v in params.items()}
python
def sample_params(params): def sample_log(myrange): x = np.random.uniform(np.log10(myrange[0]), np.log10(myrange[1])) return 10**x def sample_unif(myrange): x = np.random.uniform(myrange[0], myrange[1]) return x def sample_set(myset): x = random.sample(myset, 1) return x[0] def type_dep_sample(myrange): if type(myrange) is list: return sample_log(myrange) if type(myrange) is tuple: return sample_unif(myrange) if type(myrange) is set: return sample_set(myrange) return myrange return {k: type_dep_sample(v) for k, v in params.items()}
[ "def", "sample_params", "(", "params", ")", ":", "def", "sample_log", "(", "myrange", ")", ":", "x", "=", "np", ".", "random", ".", "uniform", "(", "np", ".", "log10", "(", "myrange", "[", "0", "]", ")", ",", "np", ".", "log10", "(", "myrange", "...
Randomly sample hyper-parameters stored in a dictionary on a predefined range and scale. Useful for hyper-parameter random search. Args: params (dict): hyper-parameters to sample. Dictionary value-type parsing: - :python:`[1e3, 1e7]` - uniformly sample on a **log10** scale from the interval :python:`(1e3,1e7)` - :python:`(1, 10)` - uniformly sample on a **normal** scale from the interval :python:`(1,10)` - :python:`{1, 2}` - sample from a **set** of values. - :python:`1` - don't sample Returns: dict: Dictionary with the same keys as :py:attr:`params`, but with only one element as the value. Examples: >>> myparams = { "max_pool": True, # allways use True "step_size": [0.09, 0.005], "step_decay": (0.9, 1), "n_splines": {10, None}, # use either 10 or None "some_tuple": {(1,2), (1)}, } >>> concise.sample_params(myparams) {'step_decay': 0.9288, 'step_size': 0.0292, 'max_pool': True, 'n_splines': None, 'some_tuple': (1, 2)} >>> concise.sample_params(myparams) {'step_decay': 0.9243, 'step_size': 0.0293, 'max_pool': True, 'n_splines': None, 'some_tuple': (1)} >>> concise.sample_params(myparams) {'step_decay': 0.9460, 'step_size': 0.0301, 'max_pool': True, 'n_splines': 10, 'some_tuple': (1, 2)} Note: - :python:`{[1,2], [3,4]}` is invalid. Use :python:`{(1,2), (3,4)}` instead. - You can allways use :python:`{}` with a single element to by-pass sampling.
[ "Randomly", "sample", "hyper", "-", "parameters", "stored", "in", "a", "dictionary", "on", "a", "predefined", "range", "and", "scale", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/args_sampler.py#L8-L68
7,037
gagneurlab/concise
concise/metrics.py
cat_acc
def cat_acc(y, z): """Classification accuracy for multi-categorical case """ weights = _cat_sample_weights(y) _acc = K.cast(K.equal(K.argmax(y, axis=-1), K.argmax(z, axis=-1)), K.floatx()) _acc = K.sum(_acc * weights) / K.sum(weights) return _acc
python
def cat_acc(y, z): weights = _cat_sample_weights(y) _acc = K.cast(K.equal(K.argmax(y, axis=-1), K.argmax(z, axis=-1)), K.floatx()) _acc = K.sum(_acc * weights) / K.sum(weights) return _acc
[ "def", "cat_acc", "(", "y", ",", "z", ")", ":", "weights", "=", "_cat_sample_weights", "(", "y", ")", "_acc", "=", "K", ".", "cast", "(", "K", ".", "equal", "(", "K", ".", "argmax", "(", "y", ",", "axis", "=", "-", "1", ")", ",", "K", ".", ...
Classification accuracy for multi-categorical case
[ "Classification", "accuracy", "for", "multi", "-", "categorical", "case" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/metrics.py#L126-L134
7,038
gagneurlab/concise
concise/utils/model_data.py
split_KFold_idx
def split_KFold_idx(train, cv_n_folds=5, stratified=False, random_state=None): """Get k-fold indices generator """ test_len(train) y = train[1] n_rows = y.shape[0] if stratified: if len(y.shape) > 1: if y.shape[1] > 1: raise ValueError("Can't use stratified K-fold with multi-column response variable") else: y = y[:, 0] # http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html#sklearn.model_selection.StratifiedKFold.split return model_selection.StratifiedKFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\ .split(X=np.zeros((n_rows, 1)), y=y) else: return model_selection.KFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\ .split(X=np.zeros((n_rows, 1)))
python
def split_KFold_idx(train, cv_n_folds=5, stratified=False, random_state=None): test_len(train) y = train[1] n_rows = y.shape[0] if stratified: if len(y.shape) > 1: if y.shape[1] > 1: raise ValueError("Can't use stratified K-fold with multi-column response variable") else: y = y[:, 0] # http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html#sklearn.model_selection.StratifiedKFold.split return model_selection.StratifiedKFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\ .split(X=np.zeros((n_rows, 1)), y=y) else: return model_selection.KFold(n_splits=cv_n_folds, shuffle=True, random_state=random_state)\ .split(X=np.zeros((n_rows, 1)))
[ "def", "split_KFold_idx", "(", "train", ",", "cv_n_folds", "=", "5", ",", "stratified", "=", "False", ",", "random_state", "=", "None", ")", ":", "test_len", "(", "train", ")", "y", "=", "train", "[", "1", "]", "n_rows", "=", "y", ".", "shape", "[", ...
Get k-fold indices generator
[ "Get", "k", "-", "fold", "indices", "generator" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/model_data.py#L38-L55
7,039
gagneurlab/concise
concise/legacy/get_data.py
prepare_data
def prepare_data(dt, features, response, sequence, id_column=None, seq_align="end", trim_seq_len=None): """ Prepare data for Concise.train or ConciseCV.train. Args: dt: A pandas DataFrame containing all the required data. features (List of strings): Column names of `dt` used to produce the features design matrix. These columns should be numeric. response (str or list of strings): Name(s) of column(s) used as a reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. id_column (str): Name of the column used as the row identifier. seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. standardize_features (bool): If True, column in the returned matrix matrix :py:attr:`X_seq` are normalied to have zero mean and unit variance. Returns: tuple: Tuple with elements: :code:`(X_feat: X_seq, y, id_vec)`, where: - :py:attr:`X_feat`: features design matrix of shape :code:`(N, D)`, where N is :code:`len(dt)` and :code:`D = len(features)` - :py:attr:`X_seq`: sequence matrix of shape :code:`(N, 1, trim_seq_len, 4)`. It represents 1-hot encoding of the DNA/RNA sequence. - :py:attr:`y`: Response variable 1-column matrix of shape :code:`(N, 1)` - :py:attr:`id_vec`: 1D Character array of shape :code:`(N)`. It represents the ID's of individual rows. Note: One-hot encoding of the DNA/RNA sequence is the following: .. code:: python { "A": np.array([1, 0, 0, 0]), "C": np.array([0, 1, 0, 0]), "G": np.array([0, 0, 1, 0]), "T": np.array([0, 0, 0, 1]), "U": np.array([0, 0, 0, 1]), "N": np.array([0, 0, 0, 0]), } """ if type(response) is str: response = [response] X_feat = np.array(dt[features], dtype="float32") y = np.array(dt[response], dtype="float32") X_seq = encodeDNA(seq_vec=dt[sequence], maxlen=trim_seq_len, seq_align=seq_align) X_seq = np.array(X_seq, dtype="float32") id_vec = np.array(dt[id_column]) return X_feat, X_seq, y, id_vec
python
def prepare_data(dt, features, response, sequence, id_column=None, seq_align="end", trim_seq_len=None): if type(response) is str: response = [response] X_feat = np.array(dt[features], dtype="float32") y = np.array(dt[response], dtype="float32") X_seq = encodeDNA(seq_vec=dt[sequence], maxlen=trim_seq_len, seq_align=seq_align) X_seq = np.array(X_seq, dtype="float32") id_vec = np.array(dt[id_column]) return X_feat, X_seq, y, id_vec
[ "def", "prepare_data", "(", "dt", ",", "features", ",", "response", ",", "sequence", ",", "id_column", "=", "None", ",", "seq_align", "=", "\"end\"", ",", "trim_seq_len", "=", "None", ")", ":", "if", "type", "(", "response", ")", "is", "str", ":", "res...
Prepare data for Concise.train or ConciseCV.train. Args: dt: A pandas DataFrame containing all the required data. features (List of strings): Column names of `dt` used to produce the features design matrix. These columns should be numeric. response (str or list of strings): Name(s) of column(s) used as a reponse variable. sequence (str): Name of the column storing the DNA/RNA sequences. id_column (str): Name of the column used as the row identifier. seq_align (str): one of ``{"start", "end"}``. To which end should we align sequences? trim_seq_len (int): Consider only first `trim_seq_len` bases of each sequence when generating the sequence design matrix. If :python:`None`, set :py:attr:`trim_seq_len` to the longest sequence length, hence whole sequences are considered. standardize_features (bool): If True, column in the returned matrix matrix :py:attr:`X_seq` are normalied to have zero mean and unit variance. Returns: tuple: Tuple with elements: :code:`(X_feat: X_seq, y, id_vec)`, where: - :py:attr:`X_feat`: features design matrix of shape :code:`(N, D)`, where N is :code:`len(dt)` and :code:`D = len(features)` - :py:attr:`X_seq`: sequence matrix of shape :code:`(N, 1, trim_seq_len, 4)`. It represents 1-hot encoding of the DNA/RNA sequence. - :py:attr:`y`: Response variable 1-column matrix of shape :code:`(N, 1)` - :py:attr:`id_vec`: 1D Character array of shape :code:`(N)`. It represents the ID's of individual rows. Note: One-hot encoding of the DNA/RNA sequence is the following: .. code:: python { "A": np.array([1, 0, 0, 0]), "C": np.array([0, 1, 0, 0]), "G": np.array([0, 0, 1, 0]), "T": np.array([0, 0, 0, 1]), "U": np.array([0, 0, 0, 1]), "N": np.array([0, 0, 0, 0]), }
[ "Prepare", "data", "for", "Concise", ".", "train", "or", "ConciseCV", ".", "train", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/get_data.py#L10-L59
7,040
gagneurlab/concise
concise/preprocessing/splines.py
EncodeSplines.fit
def fit(self, x): """Calculate the knot placement from the values ranges. # Arguments x: numpy array, either N x D or N x L x D dimensional. """ assert x.ndim > 1 self.data_min_ = np.min(x, axis=tuple(range(x.ndim - 1))) self.data_max_ = np.max(x, axis=tuple(range(x.ndim - 1))) if self.share_knots: self.data_min_[:] = np.min(self.data_min_) self.data_max_[:] = np.max(self.data_max_)
python
def fit(self, x): assert x.ndim > 1 self.data_min_ = np.min(x, axis=tuple(range(x.ndim - 1))) self.data_max_ = np.max(x, axis=tuple(range(x.ndim - 1))) if self.share_knots: self.data_min_[:] = np.min(self.data_min_) self.data_max_[:] = np.max(self.data_max_)
[ "def", "fit", "(", "self", ",", "x", ")", ":", "assert", "x", ".", "ndim", ">", "1", "self", ".", "data_min_", "=", "np", ".", "min", "(", "x", ",", "axis", "=", "tuple", "(", "range", "(", "x", ".", "ndim", "-", "1", ")", ")", ")", "self",...
Calculate the knot placement from the values ranges. # Arguments x: numpy array, either N x D or N x L x D dimensional.
[ "Calculate", "the", "knot", "placement", "from", "the", "values", "ranges", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/splines.py#L52-L64
7,041
gagneurlab/concise
concise/preprocessing/splines.py
EncodeSplines.transform
def transform(self, x, warn=True): """Obtain the transformed values """ # 1. split across last dimension # 2. re-use ranges # 3. Merge array_list = [encodeSplines(x[..., i].reshape((-1, 1)), n_bases=self.n_bases, spline_order=self.degree, warn=warn, start=self.data_min_[i], end=self.data_max_[i]).reshape(x[..., i].shape + (self.n_bases,)) for i in range(x.shape[-1])] return np.stack(array_list, axis=-2)
python
def transform(self, x, warn=True): # 1. split across last dimension # 2. re-use ranges # 3. Merge array_list = [encodeSplines(x[..., i].reshape((-1, 1)), n_bases=self.n_bases, spline_order=self.degree, warn=warn, start=self.data_min_[i], end=self.data_max_[i]).reshape(x[..., i].shape + (self.n_bases,)) for i in range(x.shape[-1])] return np.stack(array_list, axis=-2)
[ "def", "transform", "(", "self", ",", "x", ",", "warn", "=", "True", ")", ":", "# 1. split across last dimension", "# 2. re-use ranges", "# 3. Merge", "array_list", "=", "[", "encodeSplines", "(", "x", "[", "...", ",", "i", "]", ".", "reshape", "(", "(", "...
Obtain the transformed values
[ "Obtain", "the", "transformed", "values" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/splines.py#L66-L79
7,042
gagneurlab/concise
concise/layers.py
InputCodon
def InputCodon(seq_length, ignore_stop_codons=True, name=None, **kwargs): """Input placeholder for array returned by `encodeCodon` Note: The seq_length is divided by 3 Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)` """ if ignore_stop_codons: vocab = CODONS else: vocab = CODONS + STOP_CODONS assert seq_length % 3 == 0 return Input((seq_length / 3, len(vocab)), name=name, **kwargs)
python
def InputCodon(seq_length, ignore_stop_codons=True, name=None, **kwargs): if ignore_stop_codons: vocab = CODONS else: vocab = CODONS + STOP_CODONS assert seq_length % 3 == 0 return Input((seq_length / 3, len(vocab)), name=name, **kwargs)
[ "def", "InputCodon", "(", "seq_length", ",", "ignore_stop_codons", "=", "True", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ignore_stop_codons", ":", "vocab", "=", "CODONS", "else", ":", "vocab", "=", "CODONS", "+", "STOP_CODONS", ...
Input placeholder for array returned by `encodeCodon` Note: The seq_length is divided by 3 Wrapper for: `keras.layers.Input((seq_length / 3, 61 or 61), name=name, **kwargs)`
[ "Input", "placeholder", "for", "array", "returned", "by", "encodeCodon" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L40-L53
7,043
gagneurlab/concise
concise/layers.py
InputAA
def InputAA(seq_length, name=None, **kwargs): """Input placeholder for array returned by `encodeAA` Wrapper for: `keras.layers.Input((seq_length, 22), name=name, **kwargs)` """ return Input((seq_length, len(AMINO_ACIDS)), name=name, **kwargs)
python
def InputAA(seq_length, name=None, **kwargs): return Input((seq_length, len(AMINO_ACIDS)), name=name, **kwargs)
[ "def", "InputAA", "(", "seq_length", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "Input", "(", "(", "seq_length", ",", "len", "(", "AMINO_ACIDS", ")", ")", ",", "name", "=", "name", ",", "*", "*", "kwargs", ")" ]
Input placeholder for array returned by `encodeAA` Wrapper for: `keras.layers.Input((seq_length, 22), name=name, **kwargs)`
[ "Input", "placeholder", "for", "array", "returned", "by", "encodeAA" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L56-L61
7,044
gagneurlab/concise
concise/layers.py
InputRNAStructure
def InputRNAStructure(seq_length, name=None, **kwargs): """Input placeholder for array returned by `encodeRNAStructure` Wrapper for: `keras.layers.Input((seq_length, 5), name=name, **kwargs)` """ return Input((seq_length, len(RNAplfold_PROFILES)), name=name, **kwargs)
python
def InputRNAStructure(seq_length, name=None, **kwargs): return Input((seq_length, len(RNAplfold_PROFILES)), name=name, **kwargs)
[ "def", "InputRNAStructure", "(", "seq_length", ",", "name", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "Input", "(", "(", "seq_length", ",", "len", "(", "RNAplfold_PROFILES", ")", ")", ",", "name", "=", "name", ",", "*", "*", "kwargs", ...
Input placeholder for array returned by `encodeRNAStructure` Wrapper for: `keras.layers.Input((seq_length, 5), name=name, **kwargs)`
[ "Input", "placeholder", "for", "array", "returned", "by", "encodeRNAStructure" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L64-L69
7,045
gagneurlab/concise
concise/layers.py
ConvSequence._plot_weights_heatmap
def _plot_weights_heatmap(self, index=None, figsize=None, **kwargs): """Plot weights as a heatmap index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap """ W = self.get_weights()[0] if index is None: index = np.arange(W.shape[2]) fig = heatmap(np.swapaxes(W[:, :, index], 0, 1), plot_name="filter: ", vocab=self.VOCAB, figsize=figsize, **kwargs) # plt.show() return fig
python
def _plot_weights_heatmap(self, index=None, figsize=None, **kwargs): W = self.get_weights()[0] if index is None: index = np.arange(W.shape[2]) fig = heatmap(np.swapaxes(W[:, :, index], 0, 1), plot_name="filter: ", vocab=self.VOCAB, figsize=figsize, **kwargs) # plt.show() return fig
[ "def", "_plot_weights_heatmap", "(", "self", ",", "index", "=", "None", ",", "figsize", "=", "None", ",", "*", "*", "kwargs", ")", ":", "W", "=", "self", ".", "get_weights", "(", ")", "[", "0", "]", "if", "index", "is", "None", ":", "index", "=", ...
Plot weights as a heatmap index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap
[ "Plot", "weights", "as", "a", "heatmap" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L197-L210
7,046
gagneurlab/concise
concise/layers.py
ConvSequence._plot_weights_motif
def _plot_weights_motif(self, index, plot_type="motif_raw", background_probs=DEFAULT_BASE_BACKGROUND, ncol=1, figsize=None): """Index can only be a single int """ w_all = self.get_weights() if len(w_all) == 0: raise Exception("Layer needs to be initialized first") W = w_all[0] if index is None: index = np.arange(W.shape[2]) if isinstance(index, int): index = [index] fig = plt.figure(figsize=figsize) if plot_type == "motif_pwm" and plot_type in self.AVAILABLE_PLOTS: arr = pssm_array2pwm_array(W, background_probs) elif plot_type == "motif_raw" and plot_type in self.AVAILABLE_PLOTS: arr = W elif plot_type == "motif_pwm_info" and plot_type in self.AVAILABLE_PLOTS: quasi_pwm = pssm_array2pwm_array(W, background_probs) arr = _pwm2pwm_info(quasi_pwm) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS)) fig = seqlogo_fig(arr, vocab=self.VOCAB_name, figsize=figsize, ncol=ncol, plot_name="filter: ") # fig.show() return fig
python
def _plot_weights_motif(self, index, plot_type="motif_raw", background_probs=DEFAULT_BASE_BACKGROUND, ncol=1, figsize=None): w_all = self.get_weights() if len(w_all) == 0: raise Exception("Layer needs to be initialized first") W = w_all[0] if index is None: index = np.arange(W.shape[2]) if isinstance(index, int): index = [index] fig = plt.figure(figsize=figsize) if plot_type == "motif_pwm" and plot_type in self.AVAILABLE_PLOTS: arr = pssm_array2pwm_array(W, background_probs) elif plot_type == "motif_raw" and plot_type in self.AVAILABLE_PLOTS: arr = W elif plot_type == "motif_pwm_info" and plot_type in self.AVAILABLE_PLOTS: quasi_pwm = pssm_array2pwm_array(W, background_probs) arr = _pwm2pwm_info(quasi_pwm) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS)) fig = seqlogo_fig(arr, vocab=self.VOCAB_name, figsize=figsize, ncol=ncol, plot_name="filter: ") # fig.show() return fig
[ "def", "_plot_weights_motif", "(", "self", ",", "index", ",", "plot_type", "=", "\"motif_raw\"", ",", "background_probs", "=", "DEFAULT_BASE_BACKGROUND", ",", "ncol", "=", "1", ",", "figsize", "=", "None", ")", ":", "w_all", "=", "self", ".", "get_weights", ...
Index can only be a single int
[ "Index", "can", "only", "be", "a", "single", "int" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L212-L243
7,047
gagneurlab/concise
concise/layers.py
ConvSequence.plot_weights
def plot_weights(self, index=None, plot_type="motif_raw", figsize=None, ncol=1, **kwargs): """Plot filters as heatmap or motifs index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap """ if "heatmap" in self.AVAILABLE_PLOTS and plot_type == "heatmap": return self._plot_weights_heatmap(index=index, figsize=figsize, ncol=ncol, **kwargs) elif plot_type[:5] == "motif": return self._plot_weights_motif(index=index, plot_type=plot_type, figsize=figsize, ncol=ncol, **kwargs) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS))
python
def plot_weights(self, index=None, plot_type="motif_raw", figsize=None, ncol=1, **kwargs): if "heatmap" in self.AVAILABLE_PLOTS and plot_type == "heatmap": return self._plot_weights_heatmap(index=index, figsize=figsize, ncol=ncol, **kwargs) elif plot_type[:5] == "motif": return self._plot_weights_motif(index=index, plot_type=plot_type, figsize=figsize, ncol=ncol, **kwargs) else: raise ValueError("plot_type needs to be from {0}".format(self.AVAILABLE_PLOTS))
[ "def", "plot_weights", "(", "self", ",", "index", "=", "None", ",", "plot_type", "=", "\"motif_raw\"", ",", "figsize", "=", "None", ",", "ncol", "=", "1", ",", "*", "*", "kwargs", ")", ":", "if", "\"heatmap\"", "in", "self", ".", "AVAILABLE_PLOTS", "an...
Plot filters as heatmap or motifs index = can be a particular index or a list of indicies **kwargs - additional arguments to concise.utils.plot.heatmap
[ "Plot", "filters", "as", "heatmap", "or", "motifs" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/layers.py#L245-L257
7,048
gagneurlab/concise
concise/initializers.py
_check_pwm_list
def _check_pwm_list(pwm_list): """Check the input validity """ for pwm in pwm_list: if not isinstance(pwm, PWM): raise TypeError("element {0} of pwm_list is not of type PWM".format(pwm)) return True
python
def _check_pwm_list(pwm_list): for pwm in pwm_list: if not isinstance(pwm, PWM): raise TypeError("element {0} of pwm_list is not of type PWM".format(pwm)) return True
[ "def", "_check_pwm_list", "(", "pwm_list", ")", ":", "for", "pwm", "in", "pwm_list", ":", "if", "not", "isinstance", "(", "pwm", ",", "PWM", ")", ":", "raise", "TypeError", "(", "\"element {0} of pwm_list is not of type PWM\"", ".", "format", "(", "pwm", ")", ...
Check the input validity
[ "Check", "the", "input", "validity" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/initializers.py#L22-L28
7,049
gagneurlab/concise
concise/utils/plot.py
heatmap
def heatmap(w, vmin=None, vmax=None, diverge_color=False, ncol=1, plot_name=None, vocab=["A", "C", "G", "T"], figsize=(6, 2)): """Plot a heatmap from weight matrix w vmin, vmax = z axis range diverge_color = Should we use diverging colors? plot_name = plot_title vocab = vocabulary (corresponds to the first axis) """ # Generate y and x values from the dimension lengths assert len(vocab) == w.shape[0] plt_y = np.arange(w.shape[0] + 1) + 0.5 plt_x = np.arange(w.shape[1] + 1) - 0.5 z_min = w.min() z_max = w.max() if vmin is None: vmin = z_min if vmax is None: vmax = z_max if diverge_color: color_map = plt.cm.RdBu else: color_map = plt.cm.Blues fig = plt.figure(figsize=figsize) # multiple axis if len(w.shape) == 3: # n_plots = w.shape[2] nrow = math.ceil(n_plots / ncol) else: n_plots = 1 nrow = 1 ncol = 1 for i in range(n_plots): if len(w.shape) == 3: w_cur = w[:, :, i] else: w_cur = w ax = plt.subplot(nrow, ncol, i + 1) plt.tight_layout() im = ax.pcolormesh(plt_x, plt_y, w_cur, cmap=color_map, vmin=vmin, vmax=vmax, edgecolors="white") ax.grid(False) ax.set_yticklabels([""] + vocab, minor=False) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) ax.set_xticks(np.arange(w_cur.shape[1] + 1)) ax.set_xlim(plt_x.min(), plt_x.max()) ax.set_ylim(plt_y.min(), plt_y.max()) # nice scale location: # http://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(im, cax=cax) if plot_name is not None: if n_plots > 0: pln = plot_name + " {0}".format(i) else: pln = plot_name ax.set_title(pln) ax.set_aspect('equal') return fig
python
def heatmap(w, vmin=None, vmax=None, diverge_color=False, ncol=1, plot_name=None, vocab=["A", "C", "G", "T"], figsize=(6, 2)): # Generate y and x values from the dimension lengths assert len(vocab) == w.shape[0] plt_y = np.arange(w.shape[0] + 1) + 0.5 plt_x = np.arange(w.shape[1] + 1) - 0.5 z_min = w.min() z_max = w.max() if vmin is None: vmin = z_min if vmax is None: vmax = z_max if diverge_color: color_map = plt.cm.RdBu else: color_map = plt.cm.Blues fig = plt.figure(figsize=figsize) # multiple axis if len(w.shape) == 3: # n_plots = w.shape[2] nrow = math.ceil(n_plots / ncol) else: n_plots = 1 nrow = 1 ncol = 1 for i in range(n_plots): if len(w.shape) == 3: w_cur = w[:, :, i] else: w_cur = w ax = plt.subplot(nrow, ncol, i + 1) plt.tight_layout() im = ax.pcolormesh(plt_x, plt_y, w_cur, cmap=color_map, vmin=vmin, vmax=vmax, edgecolors="white") ax.grid(False) ax.set_yticklabels([""] + vocab, minor=False) ax.yaxis.set_major_locator(MaxNLocator(integer=True)) ax.set_xticks(np.arange(w_cur.shape[1] + 1)) ax.set_xlim(plt_x.min(), plt_x.max()) ax.set_ylim(plt_y.min(), plt_y.max()) # nice scale location: # http://stackoverflow.com/questions/18195758/set-matplotlib-colorbar-size-to-match-graph divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) fig.colorbar(im, cax=cax) if plot_name is not None: if n_plots > 0: pln = plot_name + " {0}".format(i) else: pln = plot_name ax.set_title(pln) ax.set_aspect('equal') return fig
[ "def", "heatmap", "(", "w", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "diverge_color", "=", "False", ",", "ncol", "=", "1", ",", "plot_name", "=", "None", ",", "vocab", "=", "[", "\"A\"", ",", "\"C\"", ",", "\"G\"", ",", "\"T\"", "...
Plot a heatmap from weight matrix w vmin, vmax = z axis range diverge_color = Should we use diverging colors? plot_name = plot_title vocab = vocabulary (corresponds to the first axis)
[ "Plot", "a", "heatmap", "from", "weight", "matrix", "w" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/plot.py#L22-L89
7,050
gagneurlab/concise
concise/utils/plot.py
add_letter_to_axis
def add_letter_to_axis(ax, let, col, x, y, height): """Add 'let' with position x,y and height height to matplotlib axis 'ax'. """ if len(let) == 2: colors = [col, "white"] elif len(let) == 1: colors = [col] else: raise ValueError("3 or more Polygons are not supported") for polygon, color in zip(let, colors): new_polygon = affinity.scale( polygon, yfact=height, origin=(0, 0, 0)) new_polygon = affinity.translate( new_polygon, xoff=x, yoff=y) patch = PolygonPatch( new_polygon, edgecolor=color, facecolor=color) ax.add_patch(patch) return
python
def add_letter_to_axis(ax, let, col, x, y, height): if len(let) == 2: colors = [col, "white"] elif len(let) == 1: colors = [col] else: raise ValueError("3 or more Polygons are not supported") for polygon, color in zip(let, colors): new_polygon = affinity.scale( polygon, yfact=height, origin=(0, 0, 0)) new_polygon = affinity.translate( new_polygon, xoff=x, yoff=y) patch = PolygonPatch( new_polygon, edgecolor=color, facecolor=color) ax.add_patch(patch) return
[ "def", "add_letter_to_axis", "(", "ax", ",", "let", ",", "col", ",", "x", ",", "y", ",", "height", ")", ":", "if", "len", "(", "let", ")", "==", "2", ":", "colors", "=", "[", "col", ",", "\"white\"", "]", "elif", "len", "(", "let", ")", "==", ...
Add 'let' with position x,y and height height to matplotlib axis 'ax'.
[ "Add", "let", "with", "position", "x", "y", "and", "height", "height", "to", "matplotlib", "axis", "ax", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/plot.py#L174-L192
7,051
gagneurlab/concise
concise/utils/plot.py
seqlogo
def seqlogo(letter_heights, vocab="DNA", ax=None): """Make a logo plot # Arguments letter_heights: "motif length" x "vocabulary size" numpy array Can also contain negative values. vocab: str, Vocabulary name. Can be: DNA, RNA, AA, RNAStruct. ax: matplotlib axis """ ax = ax or plt.gca() assert letter_heights.shape[1] == len(VOCABS[vocab]) x_range = [1, letter_heights.shape[0]] pos_heights = np.copy(letter_heights) pos_heights[letter_heights < 0] = 0 neg_heights = np.copy(letter_heights) neg_heights[letter_heights > 0] = 0 for x_pos, heights in enumerate(letter_heights): letters_and_heights = sorted(zip(heights, list(VOCABS[vocab].keys()))) y_pos_pos = 0.0 y_neg_pos = 0.0 for height, letter in letters_and_heights: color = VOCABS[vocab][letter] polygons = letter_polygons[letter] if height > 0: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_pos_pos, height) y_pos_pos += height else: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_neg_pos, height) y_neg_pos += height # if add_hline: # ax.axhline(color="black", linewidth=1) ax.set_xlim(x_range[0] - 1, x_range[1] + 1) ax.grid(False) ax.set_xticks(list(range(*x_range)) + [x_range[-1]]) ax.set_aspect(aspect='auto', adjustable='box') ax.autoscale_view()
python
def seqlogo(letter_heights, vocab="DNA", ax=None): ax = ax or plt.gca() assert letter_heights.shape[1] == len(VOCABS[vocab]) x_range = [1, letter_heights.shape[0]] pos_heights = np.copy(letter_heights) pos_heights[letter_heights < 0] = 0 neg_heights = np.copy(letter_heights) neg_heights[letter_heights > 0] = 0 for x_pos, heights in enumerate(letter_heights): letters_and_heights = sorted(zip(heights, list(VOCABS[vocab].keys()))) y_pos_pos = 0.0 y_neg_pos = 0.0 for height, letter in letters_and_heights: color = VOCABS[vocab][letter] polygons = letter_polygons[letter] if height > 0: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_pos_pos, height) y_pos_pos += height else: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_neg_pos, height) y_neg_pos += height # if add_hline: # ax.axhline(color="black", linewidth=1) ax.set_xlim(x_range[0] - 1, x_range[1] + 1) ax.grid(False) ax.set_xticks(list(range(*x_range)) + [x_range[-1]]) ax.set_aspect(aspect='auto', adjustable='box') ax.autoscale_view()
[ "def", "seqlogo", "(", "letter_heights", ",", "vocab", "=", "\"DNA\"", ",", "ax", "=", "None", ")", ":", "ax", "=", "ax", "or", "plt", ".", "gca", "(", ")", "assert", "letter_heights", ".", "shape", "[", "1", "]", "==", "len", "(", "VOCABS", "[", ...
Make a logo plot # Arguments letter_heights: "motif length" x "vocabulary size" numpy array Can also contain negative values. vocab: str, Vocabulary name. Can be: DNA, RNA, AA, RNAStruct. ax: matplotlib axis
[ "Make", "a", "logo", "plot" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/plot.py#L196-L234
7,052
gagneurlab/concise
concise/legacy/analyze.py
get_cv_accuracy
def get_cv_accuracy(res): """ Extract the cv accuracy from the model """ ac_list = [(accuracy["train_acc_final"], accuracy["test_acc_final"] ) for accuracy, weights in res] ac = np.array(ac_list) perf = { "mean_train_acc": np.mean(ac[:, 0]), "std_train_acc": np.std(ac[:, 0]), "mean_test_acc": np.mean(ac[:, 1]), "std_test_acc": np.std(ac[:, 1]), } return perf
python
def get_cv_accuracy(res): ac_list = [(accuracy["train_acc_final"], accuracy["test_acc_final"] ) for accuracy, weights in res] ac = np.array(ac_list) perf = { "mean_train_acc": np.mean(ac[:, 0]), "std_train_acc": np.std(ac[:, 0]), "mean_test_acc": np.mean(ac[:, 1]), "std_test_acc": np.std(ac[:, 1]), } return perf
[ "def", "get_cv_accuracy", "(", "res", ")", ":", "ac_list", "=", "[", "(", "accuracy", "[", "\"train_acc_final\"", "]", ",", "accuracy", "[", "\"test_acc_final\"", "]", ")", "for", "accuracy", ",", "weights", "in", "res", "]", "ac", "=", "np", ".", "array...
Extract the cv accuracy from the model
[ "Extract", "the", "cv", "accuracy", "from", "the", "model" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/analyze.py#L9-L26
7,053
gagneurlab/concise
concise/preprocessing/sequence.py
one_hot2string
def one_hot2string(arr, vocab): """Convert a one-hot encoded array back to string """ tokens = one_hot2token(arr) indexToLetter = _get_index_dict(vocab) return [''.join([indexToLetter[x] for x in row]) for row in tokens]
python
def one_hot2string(arr, vocab): tokens = one_hot2token(arr) indexToLetter = _get_index_dict(vocab) return [''.join([indexToLetter[x] for x in row]) for row in tokens]
[ "def", "one_hot2string", "(", "arr", ",", "vocab", ")", ":", "tokens", "=", "one_hot2token", "(", "arr", ")", "indexToLetter", "=", "_get_index_dict", "(", "vocab", ")", "return", "[", "''", ".", "join", "(", "[", "indexToLetter", "[", "x", "]", "for", ...
Convert a one-hot encoded array back to string
[ "Convert", "a", "one", "-", "hot", "encoded", "array", "back", "to", "string" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L32-L38
7,054
gagneurlab/concise
concise/preprocessing/sequence.py
tokenize
def tokenize(seq, vocab, neutral_vocab=[]): """Convert sequence to integers # Arguments seq: Sequence to encode vocab: Vocabulary to use neutral_vocab: Neutral vocabulary -> assign those values to -1 # Returns List of length `len(seq)` with integers from `-1` to `len(vocab) - 1` """ # Req: all vocabs have the same length if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] nchar = len(vocab[0]) for l in vocab + neutral_vocab: assert len(l) == nchar assert len(seq) % nchar == 0 # since we are using striding vocab_dict = _get_vocab_dict(vocab) for l in neutral_vocab: vocab_dict[l] = -1 # current performance bottleneck return [vocab_dict[seq[(i * nchar):((i + 1) * nchar)]] for i in range(len(seq) // nchar)]
python
def tokenize(seq, vocab, neutral_vocab=[]): # Req: all vocabs have the same length if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] nchar = len(vocab[0]) for l in vocab + neutral_vocab: assert len(l) == nchar assert len(seq) % nchar == 0 # since we are using striding vocab_dict = _get_vocab_dict(vocab) for l in neutral_vocab: vocab_dict[l] = -1 # current performance bottleneck return [vocab_dict[seq[(i * nchar):((i + 1) * nchar)]] for i in range(len(seq) // nchar)]
[ "def", "tokenize", "(", "seq", ",", "vocab", ",", "neutral_vocab", "=", "[", "]", ")", ":", "# Req: all vocabs have the same length", "if", "isinstance", "(", "neutral_vocab", ",", "str", ")", ":", "neutral_vocab", "=", "[", "neutral_vocab", "]", "nchar", "=",...
Convert sequence to integers # Arguments seq: Sequence to encode vocab: Vocabulary to use neutral_vocab: Neutral vocabulary -> assign those values to -1 # Returns List of length `len(seq)` with integers from `-1` to `len(vocab) - 1`
[ "Convert", "sequence", "to", "integers" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L41-L66
7,055
gagneurlab/concise
concise/preprocessing/sequence.py
encodeSequence
def encodeSequence(seq_vec, vocab, neutral_vocab, maxlen=None, seq_align="start", pad_value="N", encode_type="one_hot"): """Convert a list of genetic sequences into one-hot-encoded array. # Arguments seq_vec: list of strings (genetic sequences) vocab: list of chars: List of "words" to use as the vocabulary. Can be strings of length>0, but all need to have the same length. For DNA, this is: ["A", "C", "G", "T"]. neutral_vocab: list of chars: Values used to pad the sequence or represent unknown-values. For DNA, this is: ["N"]. maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? encode_type: "one_hot" or "token". "token" represents each vocab element as a positive integer from 1 to len(vocab) + 1. neutral_vocab is represented with 0. # Returns Array with shape for encode_type: - "one_hot": `(len(seq_vec), maxlen, len(vocab))` - "token": `(len(seq_vec), maxlen)` If `maxlen=None`, it gets the value of the longest sequence length from `seq_vec`. """ if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] if isinstance(seq_vec, str): raise ValueError("seq_vec should be an iterable returning " + "strings not a string itself") assert len(vocab[0]) == len(pad_value) assert pad_value in neutral_vocab assert encode_type in ["one_hot", "token"] seq_vec = pad_sequences(seq_vec, maxlen=maxlen, align=seq_align, value=pad_value) if encode_type == "one_hot": arr_list = [token2one_hot(tokenize(seq, vocab, neutral_vocab), len(vocab)) for i, seq in enumerate(seq_vec)] elif encode_type == "token": arr_list = [1 + np.array(tokenize(seq, vocab, neutral_vocab)) for seq in seq_vec] # we add 1 to be compatible with keras: https://keras.io/layers/embeddings/ # indexes > 0, 0 = padding element return np.stack(arr_list)
python
def encodeSequence(seq_vec, vocab, neutral_vocab, maxlen=None, seq_align="start", pad_value="N", encode_type="one_hot"): if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] if isinstance(seq_vec, str): raise ValueError("seq_vec should be an iterable returning " + "strings not a string itself") assert len(vocab[0]) == len(pad_value) assert pad_value in neutral_vocab assert encode_type in ["one_hot", "token"] seq_vec = pad_sequences(seq_vec, maxlen=maxlen, align=seq_align, value=pad_value) if encode_type == "one_hot": arr_list = [token2one_hot(tokenize(seq, vocab, neutral_vocab), len(vocab)) for i, seq in enumerate(seq_vec)] elif encode_type == "token": arr_list = [1 + np.array(tokenize(seq, vocab, neutral_vocab)) for seq in seq_vec] # we add 1 to be compatible with keras: https://keras.io/layers/embeddings/ # indexes > 0, 0 = padding element return np.stack(arr_list)
[ "def", "encodeSequence", "(", "seq_vec", ",", "vocab", ",", "neutral_vocab", ",", "maxlen", "=", "None", ",", "seq_align", "=", "\"start\"", ",", "pad_value", "=", "\"N\"", ",", "encode_type", "=", "\"one_hot\"", ")", ":", "if", "isinstance", "(", "neutral_v...
Convert a list of genetic sequences into one-hot-encoded array. # Arguments seq_vec: list of strings (genetic sequences) vocab: list of chars: List of "words" to use as the vocabulary. Can be strings of length>0, but all need to have the same length. For DNA, this is: ["A", "C", "G", "T"]. neutral_vocab: list of chars: Values used to pad the sequence or represent unknown-values. For DNA, this is: ["N"]. maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? encode_type: "one_hot" or "token". "token" represents each vocab element as a positive integer from 1 to len(vocab) + 1. neutral_vocab is represented with 0. # Returns Array with shape for encode_type: - "one_hot": `(len(seq_vec), maxlen, len(vocab))` - "token": `(len(seq_vec), maxlen)` If `maxlen=None`, it gets the value of the longest sequence length from `seq_vec`.
[ "Convert", "a", "list", "of", "genetic", "sequences", "into", "one", "-", "hot", "-", "encoded", "array", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L94-L141
7,056
gagneurlab/concise
concise/preprocessing/sequence.py
encodeDNA
def encodeDNA(seq_vec, maxlen=None, seq_align="start"): """Convert the DNA sequence into 1-hot-encoding numpy array # Arguments seq_vec: list of chars List of sequences that can have different lengths maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? # Returns 3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4) # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8) >>> X_seq.shape (2, 8, 4) >>> print(X_seq) [[[0 0 0 1] [1 0 0 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [1 0 0 0] [0 0 1 0] [1 0 0 0]] [[0 0 0 0] [0 0 0 0] [0 0 0 1] [0 1 0 0] [0 0 0 1] [0 0 0 1] [0 0 0 1] [1 0 0 0]]] ``` """ return encodeSequence(seq_vec, vocab=DNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
python
def encodeDNA(seq_vec, maxlen=None, seq_align="start"): return encodeSequence(seq_vec, vocab=DNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
[ "def", "encodeDNA", "(", "seq_vec", ",", "maxlen", "=", "None", ",", "seq_align", "=", "\"start\"", ")", ":", "return", "encodeSequence", "(", "seq_vec", ",", "vocab", "=", "DNA", ",", "neutral_vocab", "=", "\"N\"", ",", "maxlen", "=", "maxlen", ",", "se...
Convert the DNA sequence into 1-hot-encoding numpy array # Arguments seq_vec: list of chars List of sequences that can have different lengths maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? # Returns 3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4) # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8) >>> X_seq.shape (2, 8, 4) >>> print(X_seq) [[[0 0 0 1] [1 0 0 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [1 0 0 0] [0 0 1 0] [1 0 0 0]] [[0 0 0 0] [0 0 0 0] [0 0 0 1] [0 1 0 0] [0 0 0 1] [0 0 0 1] [0 0 0 1] [1 0 0 0]]] ```
[ "Convert", "the", "DNA", "sequence", "into", "1", "-", "hot", "-", "encoding", "numpy", "array" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L144-L196
7,057
gagneurlab/concise
concise/preprocessing/sequence.py
encodeRNA
def encodeRNA(seq_vec, maxlen=None, seq_align="start"): """Convert the RNA sequence into 1-hot-encoding numpy array as for encodeDNA """ return encodeSequence(seq_vec, vocab=RNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
python
def encodeRNA(seq_vec, maxlen=None, seq_align="start"): return encodeSequence(seq_vec, vocab=RNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
[ "def", "encodeRNA", "(", "seq_vec", ",", "maxlen", "=", "None", ",", "seq_align", "=", "\"start\"", ")", ":", "return", "encodeSequence", "(", "seq_vec", ",", "vocab", "=", "RNA", ",", "neutral_vocab", "=", "\"N\"", ",", "maxlen", "=", "maxlen", ",", "se...
Convert the RNA sequence into 1-hot-encoding numpy array as for encodeDNA
[ "Convert", "the", "RNA", "sequence", "into", "1", "-", "hot", "-", "encoding", "numpy", "array", "as", "for", "encodeDNA" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L199-L208
7,058
gagneurlab/concise
concise/preprocessing/sequence.py
encodeCodon
def encodeCodon(seq_vec, ignore_stop_codons=True, maxlen=None, seq_align="start", encode_type="one_hot"): """Convert the Codon sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/DNA sequences ignore_stop_codons: boolean; if True, STOP_CODONS are omitted from one-hot encoding. maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen / 3, 61 if ignore_stop_codons else 64)` """ if ignore_stop_codons: vocab = CODONS neutral_vocab = STOP_CODONS + ["NNN"] else: vocab = CODONS + STOP_CODONS neutral_vocab = ["NNN"] # replace all U's with A's? seq_vec = [str(seq).replace("U", "T") for seq in seq_vec] return encodeSequence(seq_vec, vocab=vocab, neutral_vocab=neutral_vocab, maxlen=maxlen, seq_align=seq_align, pad_value="NNN", encode_type=encode_type)
python
def encodeCodon(seq_vec, ignore_stop_codons=True, maxlen=None, seq_align="start", encode_type="one_hot"): if ignore_stop_codons: vocab = CODONS neutral_vocab = STOP_CODONS + ["NNN"] else: vocab = CODONS + STOP_CODONS neutral_vocab = ["NNN"] # replace all U's with A's? seq_vec = [str(seq).replace("U", "T") for seq in seq_vec] return encodeSequence(seq_vec, vocab=vocab, neutral_vocab=neutral_vocab, maxlen=maxlen, seq_align=seq_align, pad_value="NNN", encode_type=encode_type)
[ "def", "encodeCodon", "(", "seq_vec", ",", "ignore_stop_codons", "=", "True", ",", "maxlen", "=", "None", ",", "seq_align", "=", "\"start\"", ",", "encode_type", "=", "\"one_hot\"", ")", ":", "if", "ignore_stop_codons", ":", "vocab", "=", "CODONS", "neutral_vo...
Convert the Codon sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/DNA sequences ignore_stop_codons: boolean; if True, STOP_CODONS are omitted from one-hot encoding. maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen / 3, 61 if ignore_stop_codons else 64)`
[ "Convert", "the", "Codon", "sequence", "into", "1", "-", "hot", "-", "encoding", "numpy", "array" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L211-L240
7,059
gagneurlab/concise
concise/preprocessing/sequence.py
encodeAA
def encodeAA(seq_vec, maxlen=None, seq_align="start", encode_type="one_hot"): """Convert the Amino-acid sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/amino-acid sequences maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen, 22)` """ return encodeSequence(seq_vec, vocab=AMINO_ACIDS, neutral_vocab="_", maxlen=maxlen, seq_align=seq_align, pad_value="_", encode_type=encode_type)
python
def encodeAA(seq_vec, maxlen=None, seq_align="start", encode_type="one_hot"): return encodeSequence(seq_vec, vocab=AMINO_ACIDS, neutral_vocab="_", maxlen=maxlen, seq_align=seq_align, pad_value="_", encode_type=encode_type)
[ "def", "encodeAA", "(", "seq_vec", ",", "maxlen", "=", "None", ",", "seq_align", "=", "\"start\"", ",", "encode_type", "=", "\"one_hot\"", ")", ":", "return", "encodeSequence", "(", "seq_vec", ",", "vocab", "=", "AMINO_ACIDS", ",", "neutral_vocab", "=", "\"_...
Convert the Amino-acid sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/amino-acid sequences maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen, 22)`
[ "Convert", "the", "Amino", "-", "acid", "sequence", "into", "1", "-", "hot", "-", "encoding", "numpy", "array" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L243-L261
7,060
gagneurlab/concise
concise/utils/position.py
_validate_pos
def _validate_pos(df): """Validates the returned positional object """ assert isinstance(df, pd.DataFrame) assert ["seqname", "position", "strand"] == df.columns.tolist() assert df.position.dtype == np.dtype("int64") assert df.strand.dtype == np.dtype("O") assert df.seqname.dtype == np.dtype("O") return df
python
def _validate_pos(df): assert isinstance(df, pd.DataFrame) assert ["seqname", "position", "strand"] == df.columns.tolist() assert df.position.dtype == np.dtype("int64") assert df.strand.dtype == np.dtype("O") assert df.seqname.dtype == np.dtype("O") return df
[ "def", "_validate_pos", "(", "df", ")", ":", "assert", "isinstance", "(", "df", ",", "pd", ".", "DataFrame", ")", "assert", "[", "\"seqname\"", ",", "\"position\"", ",", "\"strand\"", "]", "==", "df", ".", "columns", ".", "tolist", "(", ")", "assert", ...
Validates the returned positional object
[ "Validates", "the", "returned", "positional", "object" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/position.py#L131-L139
7,061
gagneurlab/concise
concise/data/attract.py
get_pwm_list
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001): """Get a list of Attract PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = load_motif_db(ATTRACT_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[str(m)] + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
python
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001): l = load_motif_db(ATTRACT_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[str(m)] + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
[ "def", "get_pwm_list", "(", "pwm_id_list", ",", "pseudocountProb", "=", "0.0001", ")", ":", "l", "=", "load_motif_db", "(", "ATTRACT_PWM", ")", "l", "=", "{", "k", ".", "split", "(", ")", "[", "0", "]", ":", "v", "for", "k", ",", "v", "in", "l", ...
Get a list of Attract PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances.
[ "Get", "a", "list", "of", "Attract", "PWM", "s", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/data/attract.py#L38-L51
7,062
gagneurlab/concise
concise/losses.py
mask_loss
def mask_loss(loss, mask_value=MASK_VALUE): """Generates a new loss function that ignores values where `y_true == mask_value`. # Arguments loss: str; name of the keras loss function from `keras.losses` mask_value: int; which values should be masked # Returns function; Masked version of the `loss` # Example ```python categorical_crossentropy_masked = mask_loss("categorical_crossentropy") ``` """ loss_fn = kloss.deserialize(loss) def masked_loss_fn(y_true, y_pred): # currently not suppoerd with NA's: # - there is no K.is_nan impolementation in keras.backend # - https://github.com/fchollet/keras/issues/1628 mask = K.cast(K.not_equal(y_true, mask_value), K.floatx()) # we divide by the mean to correct for the number of done loss evaluations return loss_fn(y_true * mask, y_pred * mask) / K.mean(mask) masked_loss_fn.__name__ = loss + "_masked" return masked_loss_fn
python
def mask_loss(loss, mask_value=MASK_VALUE): loss_fn = kloss.deserialize(loss) def masked_loss_fn(y_true, y_pred): # currently not suppoerd with NA's: # - there is no K.is_nan impolementation in keras.backend # - https://github.com/fchollet/keras/issues/1628 mask = K.cast(K.not_equal(y_true, mask_value), K.floatx()) # we divide by the mean to correct for the number of done loss evaluations return loss_fn(y_true * mask, y_pred * mask) / K.mean(mask) masked_loss_fn.__name__ = loss + "_masked" return masked_loss_fn
[ "def", "mask_loss", "(", "loss", ",", "mask_value", "=", "MASK_VALUE", ")", ":", "loss_fn", "=", "kloss", ".", "deserialize", "(", "loss", ")", "def", "masked_loss_fn", "(", "y_true", ",", "y_pred", ")", ":", "# currently not suppoerd with NA's:", "# - there is...
Generates a new loss function that ignores values where `y_true == mask_value`. # Arguments loss: str; name of the keras loss function from `keras.losses` mask_value: int; which values should be masked # Returns function; Masked version of the `loss` # Example ```python categorical_crossentropy_masked = mask_loss("categorical_crossentropy") ```
[ "Generates", "a", "new", "loss", "function", "that", "ignores", "values", "where", "y_true", "==", "mask_value", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/losses.py#L9-L36
7,063
gagneurlab/concise
concise/data/hocomoco.py
get_pwm_list
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001): """Get a list of HOCOMOCO PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = load_motif_db(HOCOMOCO_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(_normalize_pwm(l[m]) + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
python
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001): l = load_motif_db(HOCOMOCO_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(_normalize_pwm(l[m]) + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
[ "def", "get_pwm_list", "(", "pwm_id_list", ",", "pseudocountProb", "=", "0.0001", ")", ":", "l", "=", "load_motif_db", "(", "HOCOMOCO_PWM", ")", "l", "=", "{", "k", ".", "split", "(", ")", "[", "0", "]", ":", "v", "for", "k", ",", "v", "in", "l", ...
Get a list of HOCOMOCO PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances.
[ "Get", "a", "list", "of", "HOCOMOCO", "PWM", "s", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/data/hocomoco.py#L43-L56
7,064
gagneurlab/concise
concise/legacy/concise.py
Concise._var_res_to_weights
def _var_res_to_weights(self, var_res): """ Get model weights """ # transform the weights into our form motif_base_weights_raw = var_res["motif_base_weights"][0] motif_base_weights = np.swapaxes(motif_base_weights_raw, 0, 2) # get weights motif_weights = var_res["motif_weights"] motif_bias = var_res["motif_bias"] final_bias = var_res["final_bias"] feature_weights = var_res["feature_weights"] # get the GAM prediction: spline_pred = None spline_weights = None if self._param["n_splines"] is not None: spline_pred = self._splines["X_spline"].dot(var_res["spline_weights"]) if self._param["spline_exp"] is True: spline_pred = np.exp(spline_pred) else: spline_pred = (spline_pred + 1) spline_pred.reshape([-1]) spline_weights = var_res["spline_weights"] weights = {"motif_base_weights": motif_base_weights, "motif_weights": motif_weights, "motif_bias": motif_bias, "final_bias": final_bias, "feature_weights": feature_weights, "spline_pred": spline_pred, "spline_weights": spline_weights } return weights
python
def _var_res_to_weights(self, var_res): # transform the weights into our form motif_base_weights_raw = var_res["motif_base_weights"][0] motif_base_weights = np.swapaxes(motif_base_weights_raw, 0, 2) # get weights motif_weights = var_res["motif_weights"] motif_bias = var_res["motif_bias"] final_bias = var_res["final_bias"] feature_weights = var_res["feature_weights"] # get the GAM prediction: spline_pred = None spline_weights = None if self._param["n_splines"] is not None: spline_pred = self._splines["X_spline"].dot(var_res["spline_weights"]) if self._param["spline_exp"] is True: spline_pred = np.exp(spline_pred) else: spline_pred = (spline_pred + 1) spline_pred.reshape([-1]) spline_weights = var_res["spline_weights"] weights = {"motif_base_weights": motif_base_weights, "motif_weights": motif_weights, "motif_bias": motif_bias, "final_bias": final_bias, "feature_weights": feature_weights, "spline_pred": spline_pred, "spline_weights": spline_weights } return weights
[ "def", "_var_res_to_weights", "(", "self", ",", "var_res", ")", ":", "# transform the weights into our form", "motif_base_weights_raw", "=", "var_res", "[", "\"motif_base_weights\"", "]", "[", "0", "]", "motif_base_weights", "=", "np", ".", "swapaxes", "(", "motif_bas...
Get model weights
[ "Get", "model", "weights" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L436-L472
7,065
gagneurlab/concise
concise/legacy/concise.py
Concise._get_var_res
def _get_var_res(self, graph, var, other_var): """ Get the weights from our graph """ with tf.Session(graph=graph) as sess: sess.run(other_var["init"]) # all_vars = tf.all_variables() # print("All variable names") # print([var.name for var in all_vars]) # print("All variable values") # print(sess.run(all_vars)) var_res = self._get_var_res_sess(sess, var) return var_res
python
def _get_var_res(self, graph, var, other_var): with tf.Session(graph=graph) as sess: sess.run(other_var["init"]) # all_vars = tf.all_variables() # print("All variable names") # print([var.name for var in all_vars]) # print("All variable values") # print(sess.run(all_vars)) var_res = self._get_var_res_sess(sess, var) return var_res
[ "def", "_get_var_res", "(", "self", ",", "graph", ",", "var", ",", "other_var", ")", ":", "with", "tf", ".", "Session", "(", "graph", "=", "graph", ")", "as", "sess", ":", "sess", ".", "run", "(", "other_var", "[", "\"init\"", "]", ")", "# all_vars =...
Get the weights from our graph
[ "Get", "the", "weights", "from", "our", "graph" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L519-L532
7,066
gagneurlab/concise
concise/legacy/concise.py
Concise._convert_to_var
def _convert_to_var(self, graph, var_res): """ Create tf.Variables from a list of numpy arrays var_res: dictionary of numpy arrays with the key names corresponding to var """ with graph.as_default(): var = {} for key, value in var_res.items(): if value is not None: var[key] = tf.Variable(value, name="tf_%s" % key) else: var[key] = None return var
python
def _convert_to_var(self, graph, var_res): with graph.as_default(): var = {} for key, value in var_res.items(): if value is not None: var[key] = tf.Variable(value, name="tf_%s" % key) else: var[key] = None return var
[ "def", "_convert_to_var", "(", "self", ",", "graph", ",", "var_res", ")", ":", "with", "graph", ".", "as_default", "(", ")", ":", "var", "=", "{", "}", "for", "key", ",", "value", "in", "var_res", ".", "items", "(", ")", ":", "if", "value", "is", ...
Create tf.Variables from a list of numpy arrays var_res: dictionary of numpy arrays with the key names corresponding to var
[ "Create", "tf", ".", "Variables", "from", "a", "list", "of", "numpy", "arrays" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L534-L547
7,067
gagneurlab/concise
concise/legacy/concise.py
Concise.train
def train(self, X_feat, X_seq, y, X_feat_valid=None, X_seq_valid=None, y_valid=None, n_cores=3): """Train the CONCISE model :py:attr:`X_feat`, :py:attr:`X_seq`, py:attr:`y` are preferrably returned by the :py:func:`concise.prepare_data` function. Args: X_feat: Numpy (float) array of shape :code:`(N, D)`. Feature design matrix storing :code:`N` training samples and :code:`D` features X_seq: Numpy (float) array of shape :code:`(N, 1, N_seq, 4)`. It represents 1-hot encoding of the DNA/RNA sequence.(:code:`N`-seqeuences of length :code:`N_seq`) y: Numpy (float) array of shape :code:`(N, 1)`. Response variable. X_feat_valid: :py:attr:`X_feat` used for model validation. X_seq_valid: :py:attr:`X_seq` used for model validation. y: :py:attr:`y` used for model validation. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. """ if X_feat_valid is None and X_seq_valid is None and y_valid is None: X_feat_valid = X_feat X_seq_valid = X_seq y_valid = y print("Using training samples also for validation ") # insert one dimension - backcompatiblity X_seq = np.expand_dims(X_seq, axis=1) X_seq_valid = np.expand_dims(X_seq_valid, axis=1) # TODO: implement the re-training feature if self.is_trained() is True: print("Model already fitted. Re-training feature not implemented yet") return # input check assert X_seq.shape[0] == X_feat.shape[0] == y.shape[0] assert y.shape == (X_feat.shape[0], self._num_tasks) # extract data specific parameters self._param["seq_length"] = X_seq.shape[2] self._param["n_add_features"] = X_feat.shape[1] # more input check if not self._param["seq_length"] == X_seq_valid.shape[2]: raise Exception("sequence lengths don't match") # setup splines if self._param["n_splines"] is not None: padd_loss = self._param["motif_length"] - 1 # how much shorter is our sequence, since we don't use padding X_spline, S, _ = splines.get_gam_splines(start=0, end=self._param["seq_length"] - padd_loss - 1, # -1 due to zero-indexing n_bases=self._param["n_splines"], spline_order=3, add_intercept=False) self._splines = {"X_spline": X_spline, "S": S } # setup graph and variables self._graph = tf.Graph() self._var = self._get_var_initialization(self._graph, X_feat_train=X_feat, y_train=y) self._other_var = self._build_graph(self._graph, self._var) # TODO: save the intialized parameters var_res_init = self._get_var_res(self._graph, self._var, self._other_var) self.init_weights = self._var_res_to_weights(var_res=var_res_init) # finally train the model # - it saves the accuracy if self._param["optimizer"] == "adam": _train = self._train_adam elif self._param["optimizer"] == "lbfgs": _train = self._train_lbfgs else: raise Exception("Optimizer {} not implemented".format(self._param["optimizer"])) self._var_res = _train(X_feat, X_seq, y, X_feat_valid, X_seq_valid, y_valid, graph=self._graph, var=self._var, other_var=self._other_var, early_stop_patience=self._param["early_stop_patience"], n_cores=n_cores) self._model_fitted = True # TODO: maybe: # - add y_train_accuracy # - y_train return True
python
def train(self, X_feat, X_seq, y, X_feat_valid=None, X_seq_valid=None, y_valid=None, n_cores=3): if X_feat_valid is None and X_seq_valid is None and y_valid is None: X_feat_valid = X_feat X_seq_valid = X_seq y_valid = y print("Using training samples also for validation ") # insert one dimension - backcompatiblity X_seq = np.expand_dims(X_seq, axis=1) X_seq_valid = np.expand_dims(X_seq_valid, axis=1) # TODO: implement the re-training feature if self.is_trained() is True: print("Model already fitted. Re-training feature not implemented yet") return # input check assert X_seq.shape[0] == X_feat.shape[0] == y.shape[0] assert y.shape == (X_feat.shape[0], self._num_tasks) # extract data specific parameters self._param["seq_length"] = X_seq.shape[2] self._param["n_add_features"] = X_feat.shape[1] # more input check if not self._param["seq_length"] == X_seq_valid.shape[2]: raise Exception("sequence lengths don't match") # setup splines if self._param["n_splines"] is not None: padd_loss = self._param["motif_length"] - 1 # how much shorter is our sequence, since we don't use padding X_spline, S, _ = splines.get_gam_splines(start=0, end=self._param["seq_length"] - padd_loss - 1, # -1 due to zero-indexing n_bases=self._param["n_splines"], spline_order=3, add_intercept=False) self._splines = {"X_spline": X_spline, "S": S } # setup graph and variables self._graph = tf.Graph() self._var = self._get_var_initialization(self._graph, X_feat_train=X_feat, y_train=y) self._other_var = self._build_graph(self._graph, self._var) # TODO: save the intialized parameters var_res_init = self._get_var_res(self._graph, self._var, self._other_var) self.init_weights = self._var_res_to_weights(var_res=var_res_init) # finally train the model # - it saves the accuracy if self._param["optimizer"] == "adam": _train = self._train_adam elif self._param["optimizer"] == "lbfgs": _train = self._train_lbfgs else: raise Exception("Optimizer {} not implemented".format(self._param["optimizer"])) self._var_res = _train(X_feat, X_seq, y, X_feat_valid, X_seq_valid, y_valid, graph=self._graph, var=self._var, other_var=self._other_var, early_stop_patience=self._param["early_stop_patience"], n_cores=n_cores) self._model_fitted = True # TODO: maybe: # - add y_train_accuracy # - y_train return True
[ "def", "train", "(", "self", ",", "X_feat", ",", "X_seq", ",", "y", ",", "X_feat_valid", "=", "None", ",", "X_seq_valid", "=", "None", ",", "y_valid", "=", "None", ",", "n_cores", "=", "3", ")", ":", "if", "X_feat_valid", "is", "None", "and", "X_seq_...
Train the CONCISE model :py:attr:`X_feat`, :py:attr:`X_seq`, py:attr:`y` are preferrably returned by the :py:func:`concise.prepare_data` function. Args: X_feat: Numpy (float) array of shape :code:`(N, D)`. Feature design matrix storing :code:`N` training samples and :code:`D` features X_seq: Numpy (float) array of shape :code:`(N, 1, N_seq, 4)`. It represents 1-hot encoding of the DNA/RNA sequence.(:code:`N`-seqeuences of length :code:`N_seq`) y: Numpy (float) array of shape :code:`(N, 1)`. Response variable. X_feat_valid: :py:attr:`X_feat` used for model validation. X_seq_valid: :py:attr:`X_seq` used for model validation. y: :py:attr:`y` used for model validation. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored.
[ "Train", "the", "CONCISE", "model" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L554-L640
7,068
gagneurlab/concise
concise/legacy/concise.py
Concise._accuracy_in_session
def _accuracy_in_session(self, sess, other_var, X_feat, X_seq, y): """ Compute the accuracy from inside the tf session """ y_pred = self._predict_in_session(sess, other_var, X_feat, X_seq) return ce.mse(y_pred, y)
python
def _accuracy_in_session(self, sess, other_var, X_feat, X_seq, y): y_pred = self._predict_in_session(sess, other_var, X_feat, X_seq) return ce.mse(y_pred, y)
[ "def", "_accuracy_in_session", "(", "self", ",", "sess", ",", "other_var", ",", "X_feat", ",", "X_seq", ",", "y", ")", ":", "y_pred", "=", "self", ".", "_predict_in_session", "(", "sess", ",", "other_var", ",", "X_feat", ",", "X_seq", ")", "return", "ce"...
Compute the accuracy from inside the tf session
[ "Compute", "the", "accuracy", "from", "inside", "the", "tf", "session" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L653-L658
7,069
gagneurlab/concise
concise/legacy/concise.py
Concise._set_var_res
def _set_var_res(self, weights): """ Transform the weights to var_res """ if weights is None: return # layer 1 motif_base_weights_raw = np.swapaxes(weights["motif_base_weights"], 2, 0) motif_base_weights = motif_base_weights_raw[np.newaxis] motif_bias = weights["motif_bias"] feature_weights = weights["feature_weights"] spline_weights = weights["spline_weights"] # filter motif_weights = weights["motif_weights"] final_bias = weights["final_bias"] var_res = { "motif_base_weights": motif_base_weights, "motif_bias": motif_bias, "spline_weights": spline_weights, "feature_weights": feature_weights, "motif_weights": motif_weights, "final_bias": final_bias } # cast everything to float32 var_res = {key: value.astype(np.float32) if value is not None else None for key, value in var_res.items()} self._var_res = var_res
python
def _set_var_res(self, weights): if weights is None: return # layer 1 motif_base_weights_raw = np.swapaxes(weights["motif_base_weights"], 2, 0) motif_base_weights = motif_base_weights_raw[np.newaxis] motif_bias = weights["motif_bias"] feature_weights = weights["feature_weights"] spline_weights = weights["spline_weights"] # filter motif_weights = weights["motif_weights"] final_bias = weights["final_bias"] var_res = { "motif_base_weights": motif_base_weights, "motif_bias": motif_bias, "spline_weights": spline_weights, "feature_weights": feature_weights, "motif_weights": motif_weights, "final_bias": final_bias } # cast everything to float32 var_res = {key: value.astype(np.float32) if value is not None else None for key, value in var_res.items()} self._var_res = var_res
[ "def", "_set_var_res", "(", "self", ",", "weights", ")", ":", "if", "weights", "is", "None", ":", "return", "# layer 1", "motif_base_weights_raw", "=", "np", ".", "swapaxes", "(", "weights", "[", "\"motif_base_weights\"", "]", ",", "2", ",", "0", ")", "mot...
Transform the weights to var_res
[ "Transform", "the", "weights", "to", "var_res" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1028-L1059
7,070
gagneurlab/concise
concise/legacy/concise.py
ConciseCV._get_folds
def _get_folds(n_rows, n_folds, use_stored): """ Get the used CV folds """ # n_folds = self._n_folds # use_stored = self._use_stored_folds # n_rows = self._n_rows if use_stored is not None: # path = '~/concise/data-offline/lw-pombe/cv_folds_5.json' with open(os.path.expanduser(use_stored)) as json_file: json_data = json.load(json_file) # check if we have the same number of rows and folds: if json_data['N_rows'] != n_rows: raise Exception('N_rows from folds doesnt match the number of rows of X_seq, X_feat, y') if json_data['N_folds'] != n_folds: raise Exception('n_folds dont match', json_data['N_folds'], n_folds) kf = [(np.array(train), np.array(test)) for (train, test) in json_data['folds']] else: kf = KFold(n_splits=n_folds).split(np.zeros((n_rows, 1))) # store in a list i = 1 folds = [] for train, test in kf: fold = "fold_" + str(i) folds.append((fold, train, test)) i = i + 1 return folds
python
def _get_folds(n_rows, n_folds, use_stored): # n_folds = self._n_folds # use_stored = self._use_stored_folds # n_rows = self._n_rows if use_stored is not None: # path = '~/concise/data-offline/lw-pombe/cv_folds_5.json' with open(os.path.expanduser(use_stored)) as json_file: json_data = json.load(json_file) # check if we have the same number of rows and folds: if json_data['N_rows'] != n_rows: raise Exception('N_rows from folds doesnt match the number of rows of X_seq, X_feat, y') if json_data['N_folds'] != n_folds: raise Exception('n_folds dont match', json_data['N_folds'], n_folds) kf = [(np.array(train), np.array(test)) for (train, test) in json_data['folds']] else: kf = KFold(n_splits=n_folds).split(np.zeros((n_rows, 1))) # store in a list i = 1 folds = [] for train, test in kf: fold = "fold_" + str(i) folds.append((fold, train, test)) i = i + 1 return folds
[ "def", "_get_folds", "(", "n_rows", ",", "n_folds", ",", "use_stored", ")", ":", "# n_folds = self._n_folds", "# use_stored = self._use_stored_folds", "# n_rows = self._n_rows", "if", "use_stored", "is", "not", "None", ":", "# path = '~/concise/data-offline/lw-pombe/cv_folds_5....
Get the used CV folds
[ "Get", "the", "used", "CV", "folds" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1149-L1180
7,071
gagneurlab/concise
concise/legacy/concise.py
ConciseCV.train
def train(self, X_feat, X_seq, y, id_vec=None, n_folds=10, use_stored_folds=None, n_cores=1, train_global_model=False): """Train the Concise model in cross-validation. Args: X_feat: See :py:func:`concise.Concise.train` X_seq: See :py:func:`concise.Concise.train` y: See :py:func:`concise.Concise.train` id_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`. n_folds (int): Number of CV-folds to use. use_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. train_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`). """ # TODO: input check - dimensions self._use_stored_folds = use_stored_folds self._n_folds = n_folds self._n_rows = X_feat.shape[0] # TODO: - fix the get_cv_accuracy # save: # - each model # - each model's performance # - each model's predictions # - globally: # - mean perfomance # - sd performance # - predictions self._kf = self._get_folds(self._n_rows, self._n_folds, self._use_stored_folds) cv_obj = {} if id_vec is None: id_vec = np.arange(1, self._n_rows + 1) best_val_acc_epoch_l = [] for fold, train, test in self._kf: X_feat_train = X_feat[train] X_seq_train = X_seq[train] y_train = y[train] X_feat_test = X_feat[test] X_seq_test = X_seq[test] y_test = y[test] id_vec_test = id_vec[test] print(fold, "/", n_folds) # copy the object dc = copy.deepcopy(self._concise_model) dc.train(X_feat_train, X_seq_train, y_train, X_feat_test, X_seq_test, y_test, n_cores=n_cores ) dc._test(X_feat_test, X_seq_test, y_test, id_vec_test) cv_obj[fold] = dc best_val_acc_epoch_l.append(dc.get_accuracy()["best_val_acc_epoch"]) self._cv_model = cv_obj # additionaly train the global model if train_global_model: dc = copy.deepcopy(self._concise_model) # overwrite n_epochs with the best average number of best epochs dc._param["n_epochs"] = int(np.array(best_val_acc_epoch_l).mean()) print("tranining global model with n_epochs = " + str(dc._param["n_epochs"])) dc.train(X_feat, X_seq, y, n_cores=n_cores ) dc._test(X_feat, X_seq, y, id_vec) self._concise_global_model = dc
python
def train(self, X_feat, X_seq, y, id_vec=None, n_folds=10, use_stored_folds=None, n_cores=1, train_global_model=False): # TODO: input check - dimensions self._use_stored_folds = use_stored_folds self._n_folds = n_folds self._n_rows = X_feat.shape[0] # TODO: - fix the get_cv_accuracy # save: # - each model # - each model's performance # - each model's predictions # - globally: # - mean perfomance # - sd performance # - predictions self._kf = self._get_folds(self._n_rows, self._n_folds, self._use_stored_folds) cv_obj = {} if id_vec is None: id_vec = np.arange(1, self._n_rows + 1) best_val_acc_epoch_l = [] for fold, train, test in self._kf: X_feat_train = X_feat[train] X_seq_train = X_seq[train] y_train = y[train] X_feat_test = X_feat[test] X_seq_test = X_seq[test] y_test = y[test] id_vec_test = id_vec[test] print(fold, "/", n_folds) # copy the object dc = copy.deepcopy(self._concise_model) dc.train(X_feat_train, X_seq_train, y_train, X_feat_test, X_seq_test, y_test, n_cores=n_cores ) dc._test(X_feat_test, X_seq_test, y_test, id_vec_test) cv_obj[fold] = dc best_val_acc_epoch_l.append(dc.get_accuracy()["best_val_acc_epoch"]) self._cv_model = cv_obj # additionaly train the global model if train_global_model: dc = copy.deepcopy(self._concise_model) # overwrite n_epochs with the best average number of best epochs dc._param["n_epochs"] = int(np.array(best_val_acc_epoch_l).mean()) print("tranining global model with n_epochs = " + str(dc._param["n_epochs"])) dc.train(X_feat, X_seq, y, n_cores=n_cores ) dc._test(X_feat, X_seq, y, id_vec) self._concise_global_model = dc
[ "def", "train", "(", "self", ",", "X_feat", ",", "X_seq", ",", "y", ",", "id_vec", "=", "None", ",", "n_folds", "=", "10", ",", "use_stored_folds", "=", "None", ",", "n_cores", "=", "1", ",", "train_global_model", "=", "False", ")", ":", "# TODO: input...
Train the Concise model in cross-validation. Args: X_feat: See :py:func:`concise.Concise.train` X_seq: See :py:func:`concise.Concise.train` y: See :py:func:`concise.Concise.train` id_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`. n_folds (int): Number of CV-folds to use. use_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. train_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`).
[ "Train", "the", "Concise", "model", "in", "cross", "-", "validation", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1193-L1265
7,072
gagneurlab/concise
concise/legacy/concise.py
ConciseCV._from_dict
def _from_dict(self, obj_dict): """ Initialize a model from the dictionary """ self._n_folds = obj_dict["param"]["n_folds"] self._n_rows = obj_dict["param"]["n_rows"] self._use_stored_folds = obj_dict["param"]["use_stored_folds"] self._concise_model = Concise.from_dict(obj_dict["init_model"]) if obj_dict["trained_global_model"] is None: self._concise_global_model = None else: self._concise_global_model = Concise.from_dict(obj_dict["trained_global_model"]) self._kf = [(fold, np.asarray(train), np.asarray(test)) for fold, train, test in obj_dict["folds"]] self._cv_model = {fold: Concise.from_dict(model_dict) for fold, model_dict in obj_dict["output"].items()}
python
def _from_dict(self, obj_dict): self._n_folds = obj_dict["param"]["n_folds"] self._n_rows = obj_dict["param"]["n_rows"] self._use_stored_folds = obj_dict["param"]["use_stored_folds"] self._concise_model = Concise.from_dict(obj_dict["init_model"]) if obj_dict["trained_global_model"] is None: self._concise_global_model = None else: self._concise_global_model = Concise.from_dict(obj_dict["trained_global_model"]) self._kf = [(fold, np.asarray(train), np.asarray(test)) for fold, train, test in obj_dict["folds"]] self._cv_model = {fold: Concise.from_dict(model_dict) for fold, model_dict in obj_dict["output"].items()}
[ "def", "_from_dict", "(", "self", ",", "obj_dict", ")", ":", "self", ".", "_n_folds", "=", "obj_dict", "[", "\"param\"", "]", "[", "\"n_folds\"", "]", "self", ".", "_n_rows", "=", "obj_dict", "[", "\"param\"", "]", "[", "\"n_rows\"", "]", "self", ".", ...
Initialize a model from the dictionary
[ "Initialize", "a", "model", "from", "the", "dictionary" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1342-L1358
7,073
gagneurlab/concise
concise/utils/pwm.py
pwm_array2pssm_array
def pwm_array2pssm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND): """Convert pwm array to pssm array """ b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return np.log(arr / b).astype(arr.dtype)
python
def pwm_array2pssm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND): b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return np.log(arr / b).astype(arr.dtype)
[ "def", "pwm_array2pssm_array", "(", "arr", ",", "background_probs", "=", "DEFAULT_BASE_BACKGROUND", ")", ":", "b", "=", "background_probs2array", "(", "background_probs", ")", "b", "=", "b", ".", "reshape", "(", "[", "1", ",", "4", ",", "1", "]", ")", "ret...
Convert pwm array to pssm array
[ "Convert", "pwm", "array", "to", "pssm", "array" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/pwm.py#L239-L244
7,074
gagneurlab/concise
concise/utils/pwm.py
pssm_array2pwm_array
def pssm_array2pwm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND): """Convert pssm array to pwm array """ b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return (np.exp(arr) * b).astype(arr.dtype)
python
def pssm_array2pwm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND): b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return (np.exp(arr) * b).astype(arr.dtype)
[ "def", "pssm_array2pwm_array", "(", "arr", ",", "background_probs", "=", "DEFAULT_BASE_BACKGROUND", ")", ":", "b", "=", "background_probs2array", "(", "background_probs", ")", "b", "=", "b", ".", "reshape", "(", "[", "1", ",", "4", ",", "1", "]", ")", "ret...
Convert pssm array to pwm array
[ "Convert", "pssm", "array", "to", "pwm", "array" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/pwm.py#L247-L252
7,075
gagneurlab/concise
concise/utils/pwm.py
load_motif_db
def load_motif_db(filename, skipn_matrix=0): """Read the motif file in the following format ``` >motif_name <skip n>0.1<delim>0.2<delim>0.5<delim>0.6 ... >motif_name2 .... ``` Delim can be anything supported by np.loadtxt # Arguments filename: str, file path skipn_matrix: integer, number of characters to skip when reading the numeric matrix (for Encode = 2) # Returns Dictionary of numpy arrays """ # read-lines if filename.endswith(".gz"): f = gzip.open(filename, 'rt', encoding='utf-8') else: f = open(filename, 'r') lines = f.readlines() f.close() motifs_dict = {} motif_lines = "" motif_name = None def lines2matrix(lines): return np.loadtxt(StringIO(lines)) for line in lines: if line.startswith(">"): if motif_lines: # lines -> matrix motifs_dict[motif_name] = lines2matrix(motif_lines) motif_name = line[1:].strip() motif_lines = "" else: motif_lines += line[skipn_matrix:] if motif_lines and motif_name is not None: motifs_dict[motif_name] = lines2matrix(motif_lines) return motifs_dict
python
def load_motif_db(filename, skipn_matrix=0): # read-lines if filename.endswith(".gz"): f = gzip.open(filename, 'rt', encoding='utf-8') else: f = open(filename, 'r') lines = f.readlines() f.close() motifs_dict = {} motif_lines = "" motif_name = None def lines2matrix(lines): return np.loadtxt(StringIO(lines)) for line in lines: if line.startswith(">"): if motif_lines: # lines -> matrix motifs_dict[motif_name] = lines2matrix(motif_lines) motif_name = line[1:].strip() motif_lines = "" else: motif_lines += line[skipn_matrix:] if motif_lines and motif_name is not None: motifs_dict[motif_name] = lines2matrix(motif_lines) return motifs_dict
[ "def", "load_motif_db", "(", "filename", ",", "skipn_matrix", "=", "0", ")", ":", "# read-lines", "if", "filename", ".", "endswith", "(", "\".gz\"", ")", ":", "f", "=", "gzip", ".", "open", "(", "filename", ",", "'rt'", ",", "encoding", "=", "'utf-8'", ...
Read the motif file in the following format ``` >motif_name <skip n>0.1<delim>0.2<delim>0.5<delim>0.6 ... >motif_name2 .... ``` Delim can be anything supported by np.loadtxt # Arguments filename: str, file path skipn_matrix: integer, number of characters to skip when reading the numeric matrix (for Encode = 2) # Returns Dictionary of numpy arrays
[ "Read", "the", "motif", "file", "in", "the", "following", "format" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/pwm.py#L255-L306
7,076
gagneurlab/concise
concise/utils/fasta.py
iter_fasta
def iter_fasta(file_path): """Returns an iterator over the fasta file Given a fasta file. yield tuples of header, sequence Code modified from Brent Pedersen's: "Correct Way To Parse A Fasta File In Python" # Example ```python fasta = fasta_iter("hg19.fa") for header, seq in fasta: print(header) ``` """ fh = open(file_path) # ditch the boolean (x[0]) and just keep the header or sequence since # we know they alternate. faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">")) for header in faiter: # drop the ">" headerStr = header.__next__()[1:].strip() # join all sequence lines to one. seq = "".join(s.strip() for s in faiter.__next__()) yield (headerStr, seq)
python
def iter_fasta(file_path): fh = open(file_path) # ditch the boolean (x[0]) and just keep the header or sequence since # we know they alternate. faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">")) for header in faiter: # drop the ">" headerStr = header.__next__()[1:].strip() # join all sequence lines to one. seq = "".join(s.strip() for s in faiter.__next__()) yield (headerStr, seq)
[ "def", "iter_fasta", "(", "file_path", ")", ":", "fh", "=", "open", "(", "file_path", ")", "# ditch the boolean (x[0]) and just keep the header or sequence since", "# we know they alternate.", "faiter", "=", "(", "x", "[", "1", "]", "for", "x", "in", "groupby", "(",...
Returns an iterator over the fasta file Given a fasta file. yield tuples of header, sequence Code modified from Brent Pedersen's: "Correct Way To Parse A Fasta File In Python" # Example ```python fasta = fasta_iter("hg19.fa") for header, seq in fasta: print(header) ```
[ "Returns", "an", "iterator", "over", "the", "fasta", "file" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/fasta.py#L11-L39
7,077
gagneurlab/concise
concise/utils/fasta.py
write_fasta
def write_fasta(file_path, seq_list, name_list=None): """Write a fasta file # Arguments file_path: file path seq_list: List of strings name_list: List of names corresponding to the sequences. If not None, it should have the same length as `seq_list` """ if name_list is None: name_list = [str(i) for i in range(len(seq_list))] # needs to be dict or seq with open(file_path, "w") as f: for i in range(len(seq_list)): f.write(">" + name_list[i] + "\n" + seq_list[i] + "\n")
python
def write_fasta(file_path, seq_list, name_list=None): if name_list is None: name_list = [str(i) for i in range(len(seq_list))] # needs to be dict or seq with open(file_path, "w") as f: for i in range(len(seq_list)): f.write(">" + name_list[i] + "\n" + seq_list[i] + "\n")
[ "def", "write_fasta", "(", "file_path", ",", "seq_list", ",", "name_list", "=", "None", ")", ":", "if", "name_list", "is", "None", ":", "name_list", "=", "[", "str", "(", "i", ")", "for", "i", "in", "range", "(", "len", "(", "seq_list", ")", ")", "...
Write a fasta file # Arguments file_path: file path seq_list: List of strings name_list: List of names corresponding to the sequences. If not None, it should have the same length as `seq_list`
[ "Write", "a", "fasta", "file" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/fasta.py#L42-L57
7,078
gagneurlab/concise
concise/preprocessing/structure.py
read_RNAplfold
def read_RNAplfold(tmpdir, maxlen=None, seq_align="start", pad_with="E"): """ pad_with = with which 2ndary structure should we pad the sequence? """ assert pad_with in {"P", "H", "I", "M", "E"} def read_profile(tmpdir, P): return [values.strip().split("\t") for seq_name, values in iter_fasta("{tmp}/{P}_profile.fa".format(tmp=tmpdir, P=P))] def nelem(P, pad_width): """get the right neutral element """ return 1 if P is pad_with else 0 arr_hime = np.array([pad_sequences(read_profile(tmpdir, P), value=[nelem(P, pad_with)], align=seq_align, maxlen=maxlen) for P in RNAplfold_PROFILES_EXECUTE], dtype="float32") # add the pairness column arr_p = 1 - arr_hime.sum(axis=0)[np.newaxis] arr = np.concatenate((arr_p, arr_hime)) # reshape to: seq, seq_length, num_channels arr = np.moveaxis(arr, 0, 2) return arr
python
def read_RNAplfold(tmpdir, maxlen=None, seq_align="start", pad_with="E"): assert pad_with in {"P", "H", "I", "M", "E"} def read_profile(tmpdir, P): return [values.strip().split("\t") for seq_name, values in iter_fasta("{tmp}/{P}_profile.fa".format(tmp=tmpdir, P=P))] def nelem(P, pad_width): """get the right neutral element """ return 1 if P is pad_with else 0 arr_hime = np.array([pad_sequences(read_profile(tmpdir, P), value=[nelem(P, pad_with)], align=seq_align, maxlen=maxlen) for P in RNAplfold_PROFILES_EXECUTE], dtype="float32") # add the pairness column arr_p = 1 - arr_hime.sum(axis=0)[np.newaxis] arr = np.concatenate((arr_p, arr_hime)) # reshape to: seq, seq_length, num_channels arr = np.moveaxis(arr, 0, 2) return arr
[ "def", "read_RNAplfold", "(", "tmpdir", ",", "maxlen", "=", "None", ",", "seq_align", "=", "\"start\"", ",", "pad_with", "=", "\"E\"", ")", ":", "assert", "pad_with", "in", "{", "\"P\"", ",", "\"H\"", ",", "\"I\"", ",", "\"M\"", ",", "\"E\"", "}", "def...
pad_with = with which 2ndary structure should we pad the sequence?
[ "pad_with", "=", "with", "which", "2ndary", "structure", "should", "we", "pad", "the", "sequence?" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/structure.py#L42-L69
7,079
gagneurlab/concise
concise/effects/ism.py
ism
def ism(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs, output_filter_mask=None, out_annotation=None, diff_type="log_odds", rc_handling="maximum"): """In-silico mutagenesis Using ISM in with diff_type 'log_odds' and rc_handling 'maximum' will produce predictions as used in [DeepSEA](http://www.nature.com/nmeth/journal/v12/n10/full/nmeth.3547.html). ISM offers two ways to calculate the difference between the outputs created by reference and alternative sequence and two different methods to select whether to use the output generated from the forward or from the reverse-complement sequences. To calculate "e-values" as mentioned in DeepSEA the same ISM prediction has to be performed on a randomised set of 1 million 1000genomes, MAF-matched variants to get a background of predicted effects of random SNPs. # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. diff_type: "log_odds" or "diff". When set to 'log_odds' calculate scores based on log_odds, which assumes the model output is a probability. When set to 'diff' the model output for 'ref' is subtracted from 'alt'. Using 'log_odds' with outputs that are not in the range [0,1] nan will be returned. rc_handling: "average" or "maximum". Either average over the predictions derived from forward and reverse-complement predictions ('average') or pick the prediction with the bigger absolute value ('maximum'). # Returns Dictionary with the key `ism` which contains a pandas DataFrame containing the calculated values for each (selected) model output and input sequence """ seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc} assert diff_type in ["log_odds", "diff"] assert rc_handling in ["average", "maximum"] assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"]) assert get_seq_len(ref)[0] == mutation_positions.shape[0] assert len(mutation_positions.shape) == 1 # determine which outputs should be selected if output_filter_mask is None: if out_annotation is None: output_filter_mask = np.arange(out_annotation_all_outputs.shape[0]) else: output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0] # make sure the labels are assigned correctly out_annotation = out_annotation_all_outputs[output_filter_mask] preds = {} for k in seqs: # preds[k] = model.predict(seqs[k]) preds[k] = np.array(model.predict(seqs[k])[..., output_filter_mask]) if diff_type == "log_odds": if np.any([(preds[k].min() < 0 or preds[k].max() > 1) for k in preds]): warnings.warn("Using log_odds on model outputs that are not bound [0,1]") diffs = np.log(preds["alt"] / (1 - preds["alt"])) - np.log(preds["ref"] / (1 - preds["ref"])) diffs_rc = np.log(preds["alt_rc"] / (1 - preds["alt_rc"])) - np.log(preds["ref_rc"] / (1 - preds["ref_rc"])) elif diff_type == "diff": diffs = preds["alt"] - preds["ref"] diffs_rc = preds["alt_rc"] - preds["ref_rc"] if rc_handling == "average": diffs = np.mean([diffs, diffs_rc], axis=0) elif rc_handling == "maximum": replace_filt = np.abs(diffs) < np.abs(diffs_rc) diffs[replace_filt] = diffs_rc[replace_filt] diffs = pd.DataFrame(diffs, columns=out_annotation) return {"ism": diffs}
python
def ism(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs, output_filter_mask=None, out_annotation=None, diff_type="log_odds", rc_handling="maximum"): seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc} assert diff_type in ["log_odds", "diff"] assert rc_handling in ["average", "maximum"] assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"]) assert get_seq_len(ref)[0] == mutation_positions.shape[0] assert len(mutation_positions.shape) == 1 # determine which outputs should be selected if output_filter_mask is None: if out_annotation is None: output_filter_mask = np.arange(out_annotation_all_outputs.shape[0]) else: output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0] # make sure the labels are assigned correctly out_annotation = out_annotation_all_outputs[output_filter_mask] preds = {} for k in seqs: # preds[k] = model.predict(seqs[k]) preds[k] = np.array(model.predict(seqs[k])[..., output_filter_mask]) if diff_type == "log_odds": if np.any([(preds[k].min() < 0 or preds[k].max() > 1) for k in preds]): warnings.warn("Using log_odds on model outputs that are not bound [0,1]") diffs = np.log(preds["alt"] / (1 - preds["alt"])) - np.log(preds["ref"] / (1 - preds["ref"])) diffs_rc = np.log(preds["alt_rc"] / (1 - preds["alt_rc"])) - np.log(preds["ref_rc"] / (1 - preds["ref_rc"])) elif diff_type == "diff": diffs = preds["alt"] - preds["ref"] diffs_rc = preds["alt_rc"] - preds["ref_rc"] if rc_handling == "average": diffs = np.mean([diffs, diffs_rc], axis=0) elif rc_handling == "maximum": replace_filt = np.abs(diffs) < np.abs(diffs_rc) diffs[replace_filt] = diffs_rc[replace_filt] diffs = pd.DataFrame(diffs, columns=out_annotation) return {"ism": diffs}
[ "def", "ism", "(", "model", ",", "ref", ",", "ref_rc", ",", "alt", ",", "alt_rc", ",", "mutation_positions", ",", "out_annotation_all_outputs", ",", "output_filter_mask", "=", "None", ",", "out_annotation", "=", "None", ",", "diff_type", "=", "\"log_odds\"", "...
In-silico mutagenesis Using ISM in with diff_type 'log_odds' and rc_handling 'maximum' will produce predictions as used in [DeepSEA](http://www.nature.com/nmeth/journal/v12/n10/full/nmeth.3547.html). ISM offers two ways to calculate the difference between the outputs created by reference and alternative sequence and two different methods to select whether to use the output generated from the forward or from the reverse-complement sequences. To calculate "e-values" as mentioned in DeepSEA the same ISM prediction has to be performed on a randomised set of 1 million 1000genomes, MAF-matched variants to get a background of predicted effects of random SNPs. # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. diff_type: "log_odds" or "diff". When set to 'log_odds' calculate scores based on log_odds, which assumes the model output is a probability. When set to 'diff' the model output for 'ref' is subtracted from 'alt'. Using 'log_odds' with outputs that are not in the range [0,1] nan will be returned. rc_handling: "average" or "maximum". Either average over the predictions derived from forward and reverse-complement predictions ('average') or pick the prediction with the bigger absolute value ('maximum'). # Returns Dictionary with the key `ism` which contains a pandas DataFrame containing the calculated values for each (selected) model output and input sequence
[ "In", "-", "silico", "mutagenesis" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/effects/ism.py#L9-L84
7,080
gagneurlab/concise
concise/hyopt.py
_train_and_eval_single
def _train_and_eval_single(train, valid, model, batch_size=32, epochs=300, use_weight=False, callbacks=[], eval_best=False, add_eval_metrics={}): """Fit and evaluate a keras model eval_best: if True, load the checkpointed model for evaluation """ def _format_keras_history(history): """nicely format keras history """ return {"params": history.params, "loss": merge_dicts({"epoch": history.epoch}, history.history), } if use_weight: sample_weight = train[2] else: sample_weight = None # train the model logger.info("Fit...") history = History() model.fit(train[0], train[1], batch_size=batch_size, validation_data=valid[:2], epochs=epochs, sample_weight=sample_weight, verbose=2, callbacks=[history] + callbacks) # get history hist = _format_keras_history(history) # load and eval the best model if eval_best: mcp = [x for x in callbacks if isinstance(x, ModelCheckpoint)] assert len(mcp) == 1 model = load_model(mcp[0].filepath) return eval_model(model, valid, add_eval_metrics), hist
python
def _train_and_eval_single(train, valid, model, batch_size=32, epochs=300, use_weight=False, callbacks=[], eval_best=False, add_eval_metrics={}): def _format_keras_history(history): """nicely format keras history """ return {"params": history.params, "loss": merge_dicts({"epoch": history.epoch}, history.history), } if use_weight: sample_weight = train[2] else: sample_weight = None # train the model logger.info("Fit...") history = History() model.fit(train[0], train[1], batch_size=batch_size, validation_data=valid[:2], epochs=epochs, sample_weight=sample_weight, verbose=2, callbacks=[history] + callbacks) # get history hist = _format_keras_history(history) # load and eval the best model if eval_best: mcp = [x for x in callbacks if isinstance(x, ModelCheckpoint)] assert len(mcp) == 1 model = load_model(mcp[0].filepath) return eval_model(model, valid, add_eval_metrics), hist
[ "def", "_train_and_eval_single", "(", "train", ",", "valid", ",", "model", ",", "batch_size", "=", "32", ",", "epochs", "=", "300", ",", "use_weight", "=", "False", ",", "callbacks", "=", "[", "]", ",", "eval_best", "=", "False", ",", "add_eval_metrics", ...
Fit and evaluate a keras model eval_best: if True, load the checkpointed model for evaluation
[ "Fit", "and", "evaluate", "a", "keras", "model" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L315-L351
7,081
gagneurlab/concise
concise/hyopt.py
eval_model
def eval_model(model, test, add_eval_metrics={}): """Evaluate model's performance on the test-set. # Arguments model: Keras model test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`. add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from the `concise.eval_metrics` module. # Returns dictionary with evaluation metrics """ # evaluate the model logger.info("Evaluate...") # - model_metrics model_metrics_values = model.evaluate(test[0], test[1], verbose=0, batch_size=test[1].shape[0]) # evaluation is done in a single pass to have more precise metics model_metrics = dict(zip(_listify(model.metrics_names), _listify(model_metrics_values))) # - eval_metrics y_true = test[1] y_pred = model.predict(test[0], verbose=0) eval_metrics = {k: v(y_true, y_pred) for k, v in add_eval_metrics.items()} # handle the case where the two metrics names intersect # - omit duplicates from eval_metrics intersected_keys = set(model_metrics).intersection(set(eval_metrics)) if len(intersected_keys) > 0: logger.warning("Some metric names intersect: {0}. Ignoring the add_eval_metrics ones". format(intersected_keys)) eval_metrics = _delete_keys(eval_metrics, intersected_keys) return merge_dicts(model_metrics, eval_metrics)
python
def eval_model(model, test, add_eval_metrics={}): # evaluate the model logger.info("Evaluate...") # - model_metrics model_metrics_values = model.evaluate(test[0], test[1], verbose=0, batch_size=test[1].shape[0]) # evaluation is done in a single pass to have more precise metics model_metrics = dict(zip(_listify(model.metrics_names), _listify(model_metrics_values))) # - eval_metrics y_true = test[1] y_pred = model.predict(test[0], verbose=0) eval_metrics = {k: v(y_true, y_pred) for k, v in add_eval_metrics.items()} # handle the case where the two metrics names intersect # - omit duplicates from eval_metrics intersected_keys = set(model_metrics).intersection(set(eval_metrics)) if len(intersected_keys) > 0: logger.warning("Some metric names intersect: {0}. Ignoring the add_eval_metrics ones". format(intersected_keys)) eval_metrics = _delete_keys(eval_metrics, intersected_keys) return merge_dicts(model_metrics, eval_metrics)
[ "def", "eval_model", "(", "model", ",", "test", ",", "add_eval_metrics", "=", "{", "}", ")", ":", "# evaluate the model", "logger", ".", "info", "(", "\"Evaluate...\"", ")", "# - model_metrics", "model_metrics_values", "=", "model", ".", "evaluate", "(", "test",...
Evaluate model's performance on the test-set. # Arguments model: Keras model test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`. add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from the `concise.eval_metrics` module. # Returns dictionary with evaluation metrics
[ "Evaluate", "model", "s", "performance", "on", "the", "test", "-", "set", "." ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L354-L389
7,082
gagneurlab/concise
concise/hyopt.py
get_model
def get_model(model_fn, train_data, param): """Feed model_fn with train_data and param """ model_param = merge_dicts({"train_data": train_data}, param["model"], param.get("shared", {})) return model_fn(**model_param)
python
def get_model(model_fn, train_data, param): model_param = merge_dicts({"train_data": train_data}, param["model"], param.get("shared", {})) return model_fn(**model_param)
[ "def", "get_model", "(", "model_fn", ",", "train_data", ",", "param", ")", ":", "model_param", "=", "merge_dicts", "(", "{", "\"train_data\"", ":", "train_data", "}", ",", "param", "[", "\"model\"", "]", ",", "param", ".", "get", "(", "\"shared\"", ",", ...
Feed model_fn with train_data and param
[ "Feed", "model_fn", "with", "train_data", "and", "param" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L392-L396
7,083
gagneurlab/concise
concise/hyopt.py
_delete_keys
def _delete_keys(dct, keys): """Returns a copy of dct without `keys` keys """ c = deepcopy(dct) assert isinstance(keys, list) for k in keys: c.pop(k) return c
python
def _delete_keys(dct, keys): c = deepcopy(dct) assert isinstance(keys, list) for k in keys: c.pop(k) return c
[ "def", "_delete_keys", "(", "dct", ",", "keys", ")", ":", "c", "=", "deepcopy", "(", "dct", ")", "assert", "isinstance", "(", "keys", ",", "list", ")", "for", "k", "in", "keys", ":", "c", ".", "pop", "(", "k", ")", "return", "c" ]
Returns a copy of dct without `keys` keys
[ "Returns", "a", "copy", "of", "dct", "without", "keys", "keys" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L701-L708
7,084
gagneurlab/concise
concise/hyopt.py
_mean_dict
def _mean_dict(dict_list): """Compute the mean value across a list of dictionaries """ return {k: np.array([d[k] for d in dict_list]).mean() for k in dict_list[0].keys()}
python
def _mean_dict(dict_list): return {k: np.array([d[k] for d in dict_list]).mean() for k in dict_list[0].keys()}
[ "def", "_mean_dict", "(", "dict_list", ")", ":", "return", "{", "k", ":", "np", ".", "array", "(", "[", "d", "[", "k", "]", "for", "d", "in", "dict_list", "]", ")", ".", "mean", "(", ")", "for", "k", "in", "dict_list", "[", "0", "]", ".", "ke...
Compute the mean value across a list of dictionaries
[ "Compute", "the", "mean", "value", "across", "a", "list", "of", "dictionaries" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L711-L715
7,085
gagneurlab/concise
concise/hyopt.py
CMongoTrials.get_trial
def get_trial(self, tid): """Retrieve trial by tid """ lid = np.where(np.array(self.tids) == tid)[0][0] return self.trials[lid]
python
def get_trial(self, tid): lid = np.where(np.array(self.tids) == tid)[0][0] return self.trials[lid]
[ "def", "get_trial", "(", "self", ",", "tid", ")", ":", "lid", "=", "np", ".", "where", "(", "np", ".", "array", "(", "self", ".", "tids", ")", "==", "tid", ")", "[", "0", "]", "[", "0", "]", "return", "self", ".", "trials", "[", "lid", "]" ]
Retrieve trial by tid
[ "Retrieve", "trial", "by", "tid" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L116-L120
7,086
gagneurlab/concise
concise/hyopt.py
CMongoTrials.delete_running
def delete_running(self, timeout_last_refresh=0, dry_run=False): """Delete jobs stalled in the running state for too long timeout_last_refresh, int: number of seconds """ running_all = self.handle.jobs_running() running_timeout = [job for job in running_all if coarse_utcnow() > job["refresh_time"] + timedelta(seconds=timeout_last_refresh)] if len(running_timeout) == 0: # Nothing to stop self.refresh_tids(None) return None if dry_run: logger.warning("Dry run. Not removing anything.") logger.info("Removing {0}/{1} running jobs. # all jobs: {2} ". format(len(running_timeout), len(running_all), len(self))) now = coarse_utcnow() logger.info("Current utc time: {0}".format(now)) logger.info("Time horizont: {0}".format(now - timedelta(seconds=timeout_last_refresh))) for job in running_timeout: logger.info("Removing job: ") pjob = job.to_dict() del pjob["misc"] # ignore misc when printing logger.info(pprint.pformat(pjob)) if not dry_run: self.handle.delete(job) logger.info("Job deleted") self.refresh_tids(None)
python
def delete_running(self, timeout_last_refresh=0, dry_run=False): running_all = self.handle.jobs_running() running_timeout = [job for job in running_all if coarse_utcnow() > job["refresh_time"] + timedelta(seconds=timeout_last_refresh)] if len(running_timeout) == 0: # Nothing to stop self.refresh_tids(None) return None if dry_run: logger.warning("Dry run. Not removing anything.") logger.info("Removing {0}/{1} running jobs. # all jobs: {2} ". format(len(running_timeout), len(running_all), len(self))) now = coarse_utcnow() logger.info("Current utc time: {0}".format(now)) logger.info("Time horizont: {0}".format(now - timedelta(seconds=timeout_last_refresh))) for job in running_timeout: logger.info("Removing job: ") pjob = job.to_dict() del pjob["misc"] # ignore misc when printing logger.info(pprint.pformat(pjob)) if not dry_run: self.handle.delete(job) logger.info("Job deleted") self.refresh_tids(None)
[ "def", "delete_running", "(", "self", ",", "timeout_last_refresh", "=", "0", ",", "dry_run", "=", "False", ")", ":", "running_all", "=", "self", ".", "handle", ".", "jobs_running", "(", ")", "running_timeout", "=", "[", "job", "for", "job", "in", "running_...
Delete jobs stalled in the running state for too long timeout_last_refresh, int: number of seconds
[ "Delete", "jobs", "stalled", "in", "the", "running", "state", "for", "too", "long" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L174-L205
7,087
gagneurlab/concise
concise/hyopt.py
CMongoTrials.train_history
def train_history(self, tid=None): """Get train history as pd.DataFrame """ def result2history(result): if isinstance(result["history"], list): return pd.concat([pd.DataFrame(hist["loss"]).assign(fold=i) for i, hist in enumerate(result["history"])]) else: return pd.DataFrame(result["history"]["loss"]) # use all if tid is None: tid = self.valid_tid() res = [result2history(t["result"]).assign(tid=t["tid"]) for t in self.trials if t["tid"] in _listify(tid)] df = pd.concat(res) # reorder columns fold_name = ["fold"] if "fold" in df else [] df = _put_first(df, ["tid"] + fold_name + ["epoch"]) return df
python
def train_history(self, tid=None): def result2history(result): if isinstance(result["history"], list): return pd.concat([pd.DataFrame(hist["loss"]).assign(fold=i) for i, hist in enumerate(result["history"])]) else: return pd.DataFrame(result["history"]["loss"]) # use all if tid is None: tid = self.valid_tid() res = [result2history(t["result"]).assign(tid=t["tid"]) for t in self.trials if t["tid"] in _listify(tid)] df = pd.concat(res) # reorder columns fold_name = ["fold"] if "fold" in df else [] df = _put_first(df, ["tid"] + fold_name + ["epoch"]) return df
[ "def", "train_history", "(", "self", ",", "tid", "=", "None", ")", ":", "def", "result2history", "(", "result", ")", ":", "if", "isinstance", "(", "result", "[", "\"history\"", "]", ",", "list", ")", ":", "return", "pd", ".", "concat", "(", "[", "pd"...
Get train history as pd.DataFrame
[ "Get", "train", "history", "as", "pd", ".", "DataFrame" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L216-L238
7,088
gagneurlab/concise
concise/hyopt.py
CMongoTrials.as_df
def as_df(self, ignore_vals=["history"], separator=".", verbose=True): """Return a pd.DataFrame view of the whole experiment """ def add_eval(res): if "eval" not in res: if isinstance(res["history"], list): # take the average across all folds eval_names = list(res["history"][0]["loss"].keys()) eval_metrics = np.array([[v[-1] for k, v in hist["loss"].items()] for hist in res["history"]]).mean(axis=0).tolist() res["eval"] = {eval_names[i]: eval_metrics[i] for i in range(len(eval_metrics))} else: res["eval"] = {k: v[-1] for k, v in res["history"]["loss"].items()} return res def add_n_epoch(df): df_epoch = self.train_history().groupby("tid")["epoch"].max().reset_index() df_epoch.rename(columns={"epoch": "n_epoch"}, inplace=True) return pd.merge(df, df_epoch, on="tid", how="left") results = self.get_ok_results(verbose=verbose) rp = [_flatten_dict(_delete_keys(add_eval(x), ignore_vals), separator) for x in results] df = pd.DataFrame.from_records(rp) df = add_n_epoch(df) first = ["tid", "loss", "status"] return _put_first(df, first)
python
def as_df(self, ignore_vals=["history"], separator=".", verbose=True): def add_eval(res): if "eval" not in res: if isinstance(res["history"], list): # take the average across all folds eval_names = list(res["history"][0]["loss"].keys()) eval_metrics = np.array([[v[-1] for k, v in hist["loss"].items()] for hist in res["history"]]).mean(axis=0).tolist() res["eval"] = {eval_names[i]: eval_metrics[i] for i in range(len(eval_metrics))} else: res["eval"] = {k: v[-1] for k, v in res["history"]["loss"].items()} return res def add_n_epoch(df): df_epoch = self.train_history().groupby("tid")["epoch"].max().reset_index() df_epoch.rename(columns={"epoch": "n_epoch"}, inplace=True) return pd.merge(df, df_epoch, on="tid", how="left") results = self.get_ok_results(verbose=verbose) rp = [_flatten_dict(_delete_keys(add_eval(x), ignore_vals), separator) for x in results] df = pd.DataFrame.from_records(rp) df = add_n_epoch(df) first = ["tid", "loss", "status"] return _put_first(df, first)
[ "def", "as_df", "(", "self", ",", "ignore_vals", "=", "[", "\"history\"", "]", ",", "separator", "=", "\".\"", ",", "verbose", "=", "True", ")", ":", "def", "add_eval", "(", "res", ")", ":", "if", "\"eval\"", "not", "in", "res", ":", "if", "isinstanc...
Return a pd.DataFrame view of the whole experiment
[ "Return", "a", "pd", ".", "DataFrame", "view", "of", "the", "whole", "experiment" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L283-L311
7,089
gagneurlab/concise
concise/effects/snp_effects.py
effect_from_model
def effect_from_model(model, ref, ref_rc, alt, alt_rc, methods, mutation_positions, out_annotation_all_outputs, extra_args=None, **argv): """Convenience function to execute multiple effect predictions in one call # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument methods: A list of prediction functions to be executed, e.g.: from concise.effects.ism.ism. Using the same function more often than once (even with different parameters) will overwrite the results of the previous calculation of that function. mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. extra_args: None or a list of the same length as 'methods'. The elements of the list are dictionaries with additional arguments that should be passed on to the respective functions in 'methods'. Arguments defined here will overwrite arguments that are passed to all methods. **argv: Additional arguments to be passed on to all methods, e.g,: out_annotation. # Returns Dictionary containing the results of the individual calculations, the keys are the names of the executed functions """ assert isinstance(methods, list) if isinstance(extra_args, list): assert(len(extra_args) == len(methods)) else: extra_args = [None] * len(methods) main_args = {"model": model, "ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc, "mutation_positions": mutation_positions, "out_annotation_all_outputs": out_annotation_all_outputs} pred_results = {} for method, xargs in zip(methods, extra_args): if xargs is not None: if isinstance(xargs, dict): for k in argv: if k not in xargs: xargs[k] = argv[k] else: xargs = argv for k in main_args: xargs[k] = main_args[k] res = method(**xargs) pred_results[method.__name__] = res return pred_results
python
def effect_from_model(model, ref, ref_rc, alt, alt_rc, methods, mutation_positions, out_annotation_all_outputs, extra_args=None, **argv): assert isinstance(methods, list) if isinstance(extra_args, list): assert(len(extra_args) == len(methods)) else: extra_args = [None] * len(methods) main_args = {"model": model, "ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc, "mutation_positions": mutation_positions, "out_annotation_all_outputs": out_annotation_all_outputs} pred_results = {} for method, xargs in zip(methods, extra_args): if xargs is not None: if isinstance(xargs, dict): for k in argv: if k not in xargs: xargs[k] = argv[k] else: xargs = argv for k in main_args: xargs[k] = main_args[k] res = method(**xargs) pred_results[method.__name__] = res return pred_results
[ "def", "effect_from_model", "(", "model", ",", "ref", ",", "ref_rc", ",", "alt", ",", "alt_rc", ",", "methods", ",", "mutation_positions", ",", "out_annotation_all_outputs", ",", "extra_args", "=", "None", ",", "*", "*", "argv", ")", ":", "assert", "isinstan...
Convenience function to execute multiple effect predictions in one call # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument methods: A list of prediction functions to be executed, e.g.: from concise.effects.ism.ism. Using the same function more often than once (even with different parameters) will overwrite the results of the previous calculation of that function. mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. extra_args: None or a list of the same length as 'methods'. The elements of the list are dictionaries with additional arguments that should be passed on to the respective functions in 'methods'. Arguments defined here will overwrite arguments that are passed to all methods. **argv: Additional arguments to be passed on to all methods, e.g,: out_annotation. # Returns Dictionary containing the results of the individual calculations, the keys are the names of the executed functions
[ "Convenience", "function", "to", "execute", "multiple", "effect", "predictions", "in", "one", "call" ]
d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/effects/snp_effects.py#L5-L53
7,090
bitshares/uptick
uptick/markets.py
trades
def trades(ctx, market, limit, start, stop): """ List trades in a market """ market = Market(market, bitshares_instance=ctx.bitshares) t = [["time", "quote", "base", "price"]] for trade in market.trades(limit, start=start, stop=stop): t.append( [ str(trade["time"]), str(trade["quote"]), str(trade["base"]), "{:f} {}/{}".format( trade["price"], trade["base"]["asset"]["symbol"], trade["quote"]["asset"]["symbol"], ), ] ) print_table(t)
python
def trades(ctx, market, limit, start, stop): market = Market(market, bitshares_instance=ctx.bitshares) t = [["time", "quote", "base", "price"]] for trade in market.trades(limit, start=start, stop=stop): t.append( [ str(trade["time"]), str(trade["quote"]), str(trade["base"]), "{:f} {}/{}".format( trade["price"], trade["base"]["asset"]["symbol"], trade["quote"]["asset"]["symbol"], ), ] ) print_table(t)
[ "def", "trades", "(", "ctx", ",", "market", ",", "limit", ",", "start", ",", "stop", ")", ":", "market", "=", "Market", "(", "market", ",", "bitshares_instance", "=", "ctx", ".", "bitshares", ")", "t", "=", "[", "[", "\"time\"", ",", "\"quote\"", ","...
List trades in a market
[ "List", "trades", "in", "a", "market" ]
66c102200fdbf96cef4fd55cc69d00e690f62001
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L31-L49
7,091
bitshares/uptick
uptick/markets.py
ticker
def ticker(ctx, market): """ Show ticker of a market """ market = Market(market, bitshares_instance=ctx.bitshares) ticker = market.ticker() t = [["key", "value"]] for key in ticker: t.append([key, str(ticker[key])]) print_table(t)
python
def ticker(ctx, market): market = Market(market, bitshares_instance=ctx.bitshares) ticker = market.ticker() t = [["key", "value"]] for key in ticker: t.append([key, str(ticker[key])]) print_table(t)
[ "def", "ticker", "(", "ctx", ",", "market", ")", ":", "market", "=", "Market", "(", "market", ",", "bitshares_instance", "=", "ctx", ".", "bitshares", ")", "ticker", "=", "market", ".", "ticker", "(", ")", "t", "=", "[", "[", "\"key\"", ",", "\"value...
Show ticker of a market
[ "Show", "ticker", "of", "a", "market" ]
66c102200fdbf96cef4fd55cc69d00e690f62001
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L56-L64
7,092
bitshares/uptick
uptick/markets.py
cancel
def cancel(ctx, orders, account): """ Cancel one or multiple orders """ print_tx(ctx.bitshares.cancel(orders, account=account))
python
def cancel(ctx, orders, account): print_tx(ctx.bitshares.cancel(orders, account=account))
[ "def", "cancel", "(", "ctx", ",", "orders", ",", "account", ")", ":", "print_tx", "(", "ctx", ".", "bitshares", ".", "cancel", "(", "orders", ",", "account", "=", "account", ")", ")" ]
Cancel one or multiple orders
[ "Cancel", "one", "or", "multiple", "orders" ]
66c102200fdbf96cef4fd55cc69d00e690f62001
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L78-L81
7,093
bitshares/uptick
uptick/markets.py
orderbook
def orderbook(ctx, market): """ Show the orderbook of a particular market """ market = Market(market, bitshares_instance=ctx.bitshares) orderbook = market.orderbook() ta = {} ta["bids"] = [["quote", "sum quote", "base", "sum base", "price"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["bids"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["bids"].append( [ str(order["quote"]), str(cumsumquote), str(order["base"]), str(cumsumbase), "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), ] ) ta["asks"] = [["price", "base", "sum base", "quote", "sum quote"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["asks"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["asks"].append( [ "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), str(order["base"]), str(cumsumbase), str(order["quote"]), str(cumsumquote), ] ) t = [["bids", "asks"]] t.append([format_table(ta["bids"]), format_table(ta["asks"])]) print_table(t)
python
def orderbook(ctx, market): market = Market(market, bitshares_instance=ctx.bitshares) orderbook = market.orderbook() ta = {} ta["bids"] = [["quote", "sum quote", "base", "sum base", "price"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["bids"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["bids"].append( [ str(order["quote"]), str(cumsumquote), str(order["base"]), str(cumsumbase), "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), ] ) ta["asks"] = [["price", "base", "sum base", "quote", "sum quote"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["asks"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["asks"].append( [ "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), str(order["base"]), str(cumsumbase), str(order["quote"]), str(cumsumquote), ] ) t = [["bids", "asks"]] t.append([format_table(ta["bids"]), format_table(ta["asks"])]) print_table(t)
[ "def", "orderbook", "(", "ctx", ",", "market", ")", ":", "market", "=", "Market", "(", "market", ",", "bitshares_instance", "=", "ctx", ".", "bitshares", ")", "orderbook", "=", "market", ".", "orderbook", "(", ")", "ta", "=", "{", "}", "ta", "[", "\"...
Show the orderbook of a particular market
[ "Show", "the", "orderbook", "of", "a", "particular", "market" ]
66c102200fdbf96cef4fd55cc69d00e690f62001
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L88-L135
7,094
bitshares/uptick
uptick/markets.py
buy
def buy(ctx, buy_amount, buy_asset, price, sell_asset, order_expiration, account): """ Buy a specific asset at a certain rate against a base asset """ amount = Amount(buy_amount, buy_asset) price = Price( price, base=sell_asset, quote=buy_asset, bitshares_instance=ctx.bitshares ) print_tx( price.market.buy(price, amount, account=account, expiration=order_expiration) )
python
def buy(ctx, buy_amount, buy_asset, price, sell_asset, order_expiration, account): amount = Amount(buy_amount, buy_asset) price = Price( price, base=sell_asset, quote=buy_asset, bitshares_instance=ctx.bitshares ) print_tx( price.market.buy(price, amount, account=account, expiration=order_expiration) )
[ "def", "buy", "(", "ctx", ",", "buy_amount", ",", "buy_asset", ",", "price", ",", "sell_asset", ",", "order_expiration", ",", "account", ")", ":", "amount", "=", "Amount", "(", "buy_amount", ",", "buy_asset", ")", "price", "=", "Price", "(", "price", ","...
Buy a specific asset at a certain rate against a base asset
[ "Buy", "a", "specific", "asset", "at", "a", "certain", "rate", "against", "a", "base", "asset" ]
66c102200fdbf96cef4fd55cc69d00e690f62001
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L153-L162
7,095
bitshares/uptick
uptick/markets.py
openorders
def openorders(ctx, account): """ List open orders of an account """ account = Account( account or config["default_account"], bitshares_instance=ctx.bitshares ) t = [["Price", "Quote", "Base", "ID"]] for o in account.openorders: t.append( [ "{:f} {}/{}".format( o["price"], o["base"]["asset"]["symbol"], o["quote"]["asset"]["symbol"], ), str(o["quote"]), str(o["base"]), o["id"], ] ) print_table(t)
python
def openorders(ctx, account): account = Account( account or config["default_account"], bitshares_instance=ctx.bitshares ) t = [["Price", "Quote", "Base", "ID"]] for o in account.openorders: t.append( [ "{:f} {}/{}".format( o["price"], o["base"]["asset"]["symbol"], o["quote"]["asset"]["symbol"], ), str(o["quote"]), str(o["base"]), o["id"], ] ) print_table(t)
[ "def", "openorders", "(", "ctx", ",", "account", ")", ":", "account", "=", "Account", "(", "account", "or", "config", "[", "\"default_account\"", "]", ",", "bitshares_instance", "=", "ctx", ".", "bitshares", ")", "t", "=", "[", "[", "\"Price\"", ",", "\"...
List open orders of an account
[ "List", "open", "orders", "of", "an", "account" ]
66c102200fdbf96cef4fd55cc69d00e690f62001
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L196-L216
7,096
bitshares/uptick
uptick/markets.py
cancelall
def cancelall(ctx, market, account): """ Cancel all orders of an account in a market """ market = Market(market) ctx.bitshares.bundle = True market.cancel([x["id"] for x in market.accountopenorders(account)], account=account) print_tx(ctx.bitshares.txbuffer.broadcast())
python
def cancelall(ctx, market, account): market = Market(market) ctx.bitshares.bundle = True market.cancel([x["id"] for x in market.accountopenorders(account)], account=account) print_tx(ctx.bitshares.txbuffer.broadcast())
[ "def", "cancelall", "(", "ctx", ",", "market", ",", "account", ")", ":", "market", "=", "Market", "(", "market", ")", "ctx", ".", "bitshares", ".", "bundle", "=", "True", "market", ".", "cancel", "(", "[", "x", "[", "\"id\"", "]", "for", "x", "in",...
Cancel all orders of an account in a market
[ "Cancel", "all", "orders", "of", "an", "account", "in", "a", "market" ]
66c102200fdbf96cef4fd55cc69d00e690f62001
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L225-L231
7,097
bitshares/uptick
uptick/markets.py
spread
def spread(ctx, market, side, min, max, num, total, order_expiration, account): """ Place multiple orders \b :param str market: Market pair quote:base (e.g. USD:BTS) :param str side: ``buy`` or ``sell`` quote :param float min: minimum price to place order at :param float max: maximum price to place order at :param int num: Number of orders to place :param float total: Total amount of quote to use for all orders :param int order_expiration: Number of seconds until the order expires from the books """ from tqdm import tqdm from numpy import linspace market = Market(market) ctx.bitshares.bundle = True if min < max: space = linspace(min, max, num) else: space = linspace(max, min, num) func = getattr(market, side) for p in tqdm(space): func(p, total / float(num), account=account, expiration=order_expiration) print_tx(ctx.bitshares.txbuffer.broadcast())
python
def spread(ctx, market, side, min, max, num, total, order_expiration, account): from tqdm import tqdm from numpy import linspace market = Market(market) ctx.bitshares.bundle = True if min < max: space = linspace(min, max, num) else: space = linspace(max, min, num) func = getattr(market, side) for p in tqdm(space): func(p, total / float(num), account=account, expiration=order_expiration) print_tx(ctx.bitshares.txbuffer.broadcast())
[ "def", "spread", "(", "ctx", ",", "market", ",", "side", ",", "min", ",", "max", ",", "num", ",", "total", ",", "order_expiration", ",", "account", ")", ":", "from", "tqdm", "import", "tqdm", "from", "numpy", "import", "linspace", "market", "=", "Marke...
Place multiple orders \b :param str market: Market pair quote:base (e.g. USD:BTS) :param str side: ``buy`` or ``sell`` quote :param float min: minimum price to place order at :param float max: maximum price to place order at :param int num: Number of orders to place :param float total: Total amount of quote to use for all orders :param int order_expiration: Number of seconds until the order expires from the books
[ "Place", "multiple", "orders" ]
66c102200fdbf96cef4fd55cc69d00e690f62001
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L246-L273
7,098
bitshares/uptick
uptick/markets.py
updateratio
def updateratio(ctx, symbol, ratio, account): """ Update the collateral ratio of a call positions """ from bitshares.dex import Dex dex = Dex(bitshares_instance=ctx.bitshares) print_tx(dex.adjust_collateral_ratio(symbol, ratio, account=account))
python
def updateratio(ctx, symbol, ratio, account): from bitshares.dex import Dex dex = Dex(bitshares_instance=ctx.bitshares) print_tx(dex.adjust_collateral_ratio(symbol, ratio, account=account))
[ "def", "updateratio", "(", "ctx", ",", "symbol", ",", "ratio", ",", "account", ")", ":", "from", "bitshares", ".", "dex", "import", "Dex", "dex", "=", "Dex", "(", "bitshares_instance", "=", "ctx", ".", "bitshares", ")", "print_tx", "(", "dex", ".", "ad...
Update the collateral ratio of a call positions
[ "Update", "the", "collateral", "ratio", "of", "a", "call", "positions" ]
66c102200fdbf96cef4fd55cc69d00e690f62001
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L312-L318
7,099
bitshares/uptick
uptick/markets.py
bidcollateral
def bidcollateral( ctx, collateral_symbol, collateral_amount, debt_symbol, debt_amount, account ): """ Bid for collateral in the settlement fund """ print_tx( ctx.bitshares.bid_collateral( Amount(collateral_amount, collateral_symbol), Amount(debt_amount, debt_symbol), account=account, ) )
python
def bidcollateral( ctx, collateral_symbol, collateral_amount, debt_symbol, debt_amount, account ): print_tx( ctx.bitshares.bid_collateral( Amount(collateral_amount, collateral_symbol), Amount(debt_amount, debt_symbol), account=account, ) )
[ "def", "bidcollateral", "(", "ctx", ",", "collateral_symbol", ",", "collateral_amount", ",", "debt_symbol", ",", "debt_amount", ",", "account", ")", ":", "print_tx", "(", "ctx", ".", "bitshares", ".", "bid_collateral", "(", "Amount", "(", "collateral_amount", ",...
Bid for collateral in the settlement fund
[ "Bid", "for", "collateral", "in", "the", "settlement", "fund" ]
66c102200fdbf96cef4fd55cc69d00e690f62001
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L353-L364