function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self.issymmetric = True
self._inner_arg = self._lu.data | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def apply_constraints(self, b, constraints, axis=0):
if len(constraints) > 0:
assert len(constraints) == 1
assert constraints[0][0] == 0, 'Can only fix first row'
self._lu.diagonal(0)[0] = 1
s = [slice(None)]*len(b.shape)
s[axis] = 0
b[tuple(s)] = constraints[0][1]
return b | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def inner_solve(u, lu):
d = lu[0]
u[:d.shape[0]] /= d | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version') | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self.issymmetric = self.mat.issymmetric | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def LU(data):
ld = data[0, :-2]
d = data[1, :]
ud = data[2, 2:]
n = d.shape[0]
for i in range(2, n):
ld[i-2] = ld[i-2]/d[i-2]
d[i] = d[i] - ld[i-2]*ud[i-2] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def perform_lu(self):
if self._inner_arg is None:
self.LU(self._lu.data)
self._inner_arg = self._lu.data
return self._lu | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def inner_solve(u, data):
ld = data[0, :-2]
d = data[1, :]
ud = data[2, 2:]
n = d.shape[0]
for i in range(2, n):
u[i] -= ld[i-2]*u[i-2]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
for i in range(n - 3, -1, -1):
u[i] = (u[i] - ud[i]*u[i+2])/d[i] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version') | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat) | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def LU(data):
ld = data[0, :-1]
d = data[1, :]
ud = data[2, 1:]
n = d.shape[0]
for i in range(1, n):
ld[i-1] = ld[i-1]/d[i-1]
d[i] -= ld[i-1]*ud[i-1] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def inner_solve(u, data):
ld = data[0, :-1]
d = data[1, :]
ud = data[2, 1:]
n = d.shape[0]
for i in range(1, n):
u[i] -= ld[i-1]*u[i-1]
u[n-1] = u[n-1]/d[n-1]
for i in range(n-2, -1, -1):
u[i] = (u[i] - ud[i]*u[i+1])/d[i] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version') | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
assert len(self.mat) == 5 | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def LU(data): # pragma: no cover
"""LU decomposition"""
a = data[0, :-4]
b = data[1, :-2]
d = data[2, :]
e = data[3, 2:]
f = data[4, 4:]
n = d.shape[0]
m = e.shape[0]
k = n - m
for i in range(n-2*k):
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
e[i+k] -= lam*f[i]
b[i] = lam
lam = a[i]/d[i]
b[i+k] -= lam*e[i]
d[i+2*k] -= lam*f[i]
a[i] = lam
i = n-4
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
b[i] = lam
i = n-3
lam = b[i]/d[i]
d[i+k] -= lam*e[i]
b[i] = lam | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def inner_solve(u, data):
a = data[0, :-4]
b = data[1, :-2]
d = data[2, :]
e = data[3, 2:]
f = data[4, 4:]
n = d.shape[0]
u[2] -= b[0]*u[0]
u[3] -= b[1]*u[1]
for k in range(4, n):
u[k] -= (b[k-2]*u[k-2] + a[k-4]*u[k-4])
u[n-1] /= d[n-1]
u[n-2] /= d[n-2]
u[n-3] = (u[n-3]-e[n-3]*u[n-1])/d[n-3]
u[n-4] = (u[n-4]-e[n-4]*u[n-2])/d[n-4]
for k in range(n-5, -1, -1):
u[k] = (u[k]-e[k]*u[k+2]-f[k]*u[k+4])/d[k] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version') | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat) | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def LU(data):
ld = data[0, :-2]
d = data[1, :]
u1 = data[2, 2:]
u2 = data[3, 4:]
n = d.shape[0]
for i in range(2, n):
ld[i-2] = ld[i-2]/d[i-2]
d[i] = d[i] - ld[i-2]*u1[i-2]
if i < n-2:
u1[i] = u1[i] - ld[i-2]*u2[i-2] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def inner_solve(u, data):
ld = data[0, :-2]
d = data[1, :]
u1 = data[2, 2:]
u2 = data[3, 4:]
n = d.shape[0]
for i in range(2, n):
u[i] -= ld[i-2]*u[i-2]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
u[n-3] = (u[n-3] - u1[n-3]*u[n-1])/d[n-3]
u[n-4] = (u[n-4] - u1[n-4]*u[n-2])/d[n-4]
for i in range(n - 5, -1, -1):
u[i] = (u[i] - u1[i]*u[i+2] - u2[i]*u[i+4])/d[i] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version') | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self._inner_arg = self._lu.data | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def perform_lu(self):
return self._lu | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def inner_solve(u, data):
d = data[0, :]
u1 = data[1, 2:]
n = d.shape[0]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
for i in range(n - 3, -1, -1):
u[i] = (u[i] - u1[i]*u[i+2])/d[i] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version') | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, mat):
BandedMatrixSolver.__init__(self, mat)
self._inner_arg = self._lu.data | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def perform_lu(self):
return self._lu | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def inner_solve(u, data):
d = data[0, :]
u1 = data[1, 2:]
u2 = data[1, 4:]
n = d.shape[0]
u[n-1] = u[n-1]/d[n-1]
u[n-2] = u[n-2]/d[n-2]
u[n-3] = (u[n-3]-u1[n-3]*u[n-1])/d[n-3]
u[n-4] = (u[n-4]-u1[n-4]*u[n-2])/d[n-4]
for i in range(n - 5, -1, -1):
u[i] = (u[i] - u1[i]*u[i+2] - u2[i]*u[i+4])/d[i] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def Solve(u, data, axis=0):
raise NotImplementedError('Only optimized version') | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, mat, format=None):
format = config['matrix']['sparse']['solve'] if format is None else format
SparseMatrixSolver.__init__(self, mat)
self.mat = self.mat.diags(format) | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, tpmats):
tpmats = get_simplified_tpmatrices(tpmats)
bc_mats = extract_bc_matrices([tpmats])
self.tpmats = tpmats
self.bc_mats = bc_mats
self.T = tpmats[0].space
self.mats2D = {}
self._lu = None | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def get_diagonal_axis(self):
naxes = self.T.get_nondiagonal_axes()
diagonal_axis = np.setxor1d([0, 1, 2], naxes)
assert len(diagonal_axis) == 1
return diagonal_axis[0] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def apply_constraints(self, b, constraints):
"""Apply constraints to matrix and rhs vector `b`
Parameters
----------
b : array
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
"""
if len(constraints) > 0:
if self._lu is None:
A = self.mats2D[0]
A = A.tolil()
for (row, val) in constraints:
_, zerorow = A[row].nonzero()
A[(row, zerorow)] = 0
A[row, row] = 1
b[row] = val
self.mats2D[0] = A.tocsc()
else:
for (row, val) in constraints:
b[row] = val
return b | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def perform_lu(self):
if self._lu is not None:
return self._lu
ndim = self.tpmats[0].dimensions
self._lu = {}
if ndim == 2:
self._lu[0] = splu(self.mats2D[0], permc_spec=config['matrix']['sparse']['permc_spec'])
else:
diagonal_axis = self.get_diagonal_axis()
for i in range(self.T.shape(True)[diagonal_axis]):
self._lu[i] = splu(self.mats2D[i], permc_spec=config['matrix']['sparse']['permc_spec'])
return self._lu | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, tpmats):
tpmats = get_simplified_tpmatrices(tpmats)
assert len(tpmats) == 1
self.mat = tpmats[0] | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, tpmats):
bc_mats = extract_bc_matrices([tpmats])
self.tpmats = tpmats
self.bc_mats = bc_mats
self._lu = None
m = tpmats[0]
self.T = T = m.space
assert m._issimplified is False, "Cannot use simplified matrices with this solver"
mat = m.diags(format='csc')
for m in tpmats[1:]:
mat = mat + m.diags('csc')
self.mat = mat | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def apply_constraints(A, b, constraints):
"""Apply constraints to matrix `A` and rhs vector `b`
Parameters
----------
A : Sparse matrix
b : array
constraints : tuple of 2-tuples
The 2-tuples represent (row, val)
The constraint indents the matrix row and sets b[row] = val
"""
if len(constraints) > 0:
A = A.tolil()
for (row, val) in constraints:
_, zerorow = A[row].nonzero()
A[(row, zerorow)] = 0
A[row, row] = 1
b[row] = val
A = A.tocsc()
return A, b | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, tpmats):
Solver2D.__init__(self, tpmats) | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, tpmats):
Solver2D.__init__(self, tpmats) | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, mats):
assert isinstance(mats, list)
mats = get_simplified_tpmatrices(mats)
assert len(mats[0].naxes) == 1
self.naxes = mats[0].naxes[0]
bc_mats = extract_bc_matrices([mats])
self.mats = mats
self.bc_mats = bc_mats
self.solvers1D = None
self.assemble()
self._lu = False
self._data = None | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def assemble(self):
ndim = self.mats[0].dimensions
shape = self.mats[0].space.shape(True)
self.solvers1D = []
if ndim == 2:
zi = np.ndindex((1, shape[1])) if self.naxes == 0 else np.ndindex((shape[0], 1))
other_axis = (self.naxes+1) % 2
for i in zi:
sol = None
for mat in self.mats:
sc = mat.scale[i] if mat.scale.shape[other_axis] > 1 else mat.scale[0, 0]
if sol:
sol += mat.mats[self.naxes]*sc
else:
sol = mat.mats[self.naxes]*sc
self.solvers1D.append(Solver(sol))
elif ndim == 3:
s = [0, 0, 0]
n0, n1 = np.setxor1d((0, 1, 2), self.naxes)
for i in range(shape[n0]):
self.solvers1D.append([])
s[n0] = i
for j in range(shape[n1]):
sol = None
s[n1] = j
for mat in self.mats:
sc = np.broadcast_to(mat.scale, shape)[tuple(s)]
if sol:
sol += mat.mats[self.naxes]*sc
else:
sol = mat.mats[self.naxes]*sc
self.solvers1D[-1].append(Solver(sol)) | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def perform_lu(self):
if self._lu is True:
return
if isinstance(self.solvers1D[0], SparseMatrixSolver):
for m in self.solvers1D:
lu = m.perform_lu()
else:
for mi in self.solvers1D:
for mij in mi:
lu = mij.perform_lu()
self._lu = True | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def solve_data(u, data, sol, naxes, is_rank_zero):
s = [0]*u.ndim
s[naxes] = slice(None)
paxes = np.setxor1d(range(u.ndim), naxes)
if u.ndim == 2:
for i in range(u.shape[paxes[0]]):
if i == 0 and is_rank_zero:
continue
s[paxes[0]] = i
s0 = tuple(s)
sol(u[s0], data[i])
elif u.ndim == 3:
for i in range(u.shape[paxes[0]]):
s[paxes[0]] = i
for j in range(u.shape[paxes[1]]):
if i == 0 and j == 0 and is_rank_zero:
continue
s[paxes[1]] = j
s0 = tuple(s)
sol(u[s0], data[i, j])
return u | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def solve(self, u, b, solvers1D, naxes):
if u is not b:
u[:] = b
s = [0]*u.ndim
s[naxes] = slice(None)
paxes = np.setxor1d(range(u.ndim), naxes)
if u.ndim == 2:
for i, sol in enumerate(solvers1D):
s[paxes[0]] = i
s0 = tuple(s)
sol.inner_solve(u[s0], sol._inner_arg)
elif u.ndim == 3:
for i, m in enumerate(solvers1D):
s[paxes[0]] = i
for j, sol in enumerate(m):
s[paxes[1]] = j
s0 = tuple(s)
sol.inner_solve(u[s0], sol._inner_arg) | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def __init__(self, mats):
assert isinstance(mats, (BlockMatrix, list))
self.bc_mat = None
self._lu = None
if isinstance(mats, BlockMatrix):
mats = mats.get_mats()
bc_mats = extract_bc_matrices([mats])
assert len(mats) > 0
self.mat = BlockMatrix(mats)
if len(bc_mats) > 0:
self.bc_mat = BlockMatrix(bc_mats) | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def apply_constraint(A, b, offset, i, constraint):
if constraint is None or comm.Get_rank() > 0:
return A, b
if isinstance(i, int):
if i > 0:
return A, b
if isinstance(i, tuple):
if np.sum(np.array(i)) > 0:
return A, b
row = offset + constraint[1]
assert isinstance(constraint, tuple)
assert len(constraint) == 3
val = constraint[2]
b[row] = val
if A is not None:
A = A.tolil()
r = A.getrow(row).nonzero()
A[(row, r[1])] = 0
A[row, row] = 1
A = A.tocsc()
return A, b | spectralDNS/shenfun | [
148,
38,
148,
24,
1485264542
] |
def custom_import_install():
if __builtin__.__import__ == NATIVE_IMPORTER:
INVALID_MODULES.update(sys.modules.keys())
__builtin__.__import__ = custom_importer | OpenTreeOfLife/opentree | [
105,
24,
105,
279,
1361218666
] |
def is_tracking_changes():
return current.request._custom_import_track_changes | OpenTreeOfLife/opentree | [
105,
24,
105,
279,
1361218666
] |
def custom_importer(name, globals=None, locals=None, fromlist=None, level=-1):
"""
The web2py custom importer. Like the standard Python importer but it
tries to transform import statements as something like
"import applications.app_name.modules.x".
If the import failed, fall back on naive_importer
"""
globals = globals or {}
locals = locals or {}
fromlist = fromlist or []
try:
if current.request._custom_import_track_changes:
base_importer = TRACK_IMPORTER
else:
base_importer = NATIVE_IMPORTER
except: # there is no current.request (should never happen)
base_importer = NATIVE_IMPORTER
# if not relative and not from applications:
if hasattr(current, 'request') \
and level <= 0 \
and not name.partition('.')[0] in INVALID_MODULES \
and isinstance(globals, dict):
import_tb = None
try:
try:
oname = name if not name.startswith('.') else '.'+name
return NATIVE_IMPORTER(oname, globals, locals, fromlist, level)
except ImportError:
items = current.request.folder.split(os.path.sep)
if not items[-1]:
items = items[:-1]
modules_prefix = '.'.join(items[-2:]) + '.modules'
if not fromlist:
# import like "import x" or "import x.y"
result = None
for itemname in name.split("."):
itemname = itemname.encode('utf-8')
new_mod = base_importer(
modules_prefix, globals, locals, [itemname], level)
try:
result = result or new_mod.__dict__[itemname]
except KeyError, e:
raise ImportError, 'Cannot import module %s' % str(e)
modules_prefix += "." + itemname
return result
else:
# import like "from x import a, b, ..."
pname = modules_prefix + "." + name
return base_importer(pname, globals, locals, fromlist, level)
except ImportError, e1:
import_tb = sys.exc_info()[2]
try:
return NATIVE_IMPORTER(name, globals, locals, fromlist, level)
except ImportError, e3:
raise ImportError, e1, import_tb # there an import error in the module
except Exception, e2:
raise e2 # there is an error in the module
finally:
if import_tb:
import_tb = None
return NATIVE_IMPORTER(name, globals, locals, fromlist, level) | OpenTreeOfLife/opentree | [
105,
24,
105,
279,
1361218666
] |
def __init__(self):
self._import_dates = {} # Import dates of the files of the modules | OpenTreeOfLife/opentree | [
105,
24,
105,
279,
1361218666
] |
def _update_dates(self, name, globals, locals, fromlist, level):
"""
Update all the dates associated to the statement import. A single
import statement may import many modules.
"""
self._reload_check(name, globals, locals, level)
for fromlist_name in fromlist or []:
pname = "%s.%s" % (name, fromlist_name)
self._reload_check(pname, globals, locals, level) | OpenTreeOfLife/opentree | [
105,
24,
105,
279,
1361218666
] |
def _get_module_file(self, module):
"""
Get the absolute path file associated to the module or None.
"""
file = getattr(module, "__file__", None)
if file:
# Make path absolute if not:
file = os.path.splitext(file)[0] + ".py" # Change .pyc for .py
if file.endswith(self.PACKAGE_PATH_SUFFIX):
file = os.path.dirname(file) # Track dir for packages
return file | OpenTreeOfLife/opentree | [
105,
24,
105,
279,
1361218666
] |
def __init__(
self,
tool_name,
work_root,
test_name=None,
param_types=["plusarg", "vlogdefine", "vlogparam"],
files=None,
tool_options={},
ref_dir=".",
use_vpi=False,
toplevel="top_module", | SymbiFlow/edalize | [
5,
3,
5,
5,
1567527171
] |
def compare_files(self, files, ref_subdir="."):
"""Check some files in the work root match those in the ref directory
The files argument gives the list of files to check. These are
interpreted as paths relative to the work directory and relative to
self.ref_dir / ref_subdir.
This is a wrapper around edalize_common.compare_files: see its
documentation for how to use the :envvar:`GOLDEN_RUN` environment
variable to copy across a golden reference.
"""
ref_dir = os.path.normpath(os.path.join(self.ref_dir, ref_subdir))
return compare_files(ref_dir, self.work_root, files) | SymbiFlow/edalize | [
5,
3,
5,
5,
1567527171
] |
def make_edalize_test(monkeypatch, tmpdir):
"""A factory fixture to make an edalize backend with work_root directory
The returned factory method takes a `tool_name` (the name of the tool) and
the keyword arguments supported by :class:`TestFixture`. It returns a
:class:`TestFixture` object, whose `work_root` is a temporary directory.
"""
# Prepend directory `mock_commands` to PATH environment variable
monkeypatch.setenv("PATH", os.path.join(tests_dir, "mock_commands"), ":")
created = []
def _fun(tool_name, **kwargs):
work_root = tmpdir / str(len(created))
work_root.mkdir()
fixture = TestFixture(tool_name, str(work_root), **kwargs)
created.append(fixture)
return fixture
return _fun | SymbiFlow/edalize | [
5,
3,
5,
5,
1567527171
] |
def param_gen(paramtypes):
"""Generate dictionary of definitions in *paramtypes* list."""
defs = OrderedDict()
for paramtype in paramtypes:
for datatype in ["bool", "int", "str"]:
if datatype == "int":
default = 42
elif datatype == "str":
default = "hello"
else:
default = True
defs[paramtype + "_" + datatype] = {
"datatype": datatype,
"default": default,
"description": "",
"paramtype": paramtype,
}
return defs | SymbiFlow/edalize | [
5,
3,
5,
5,
1567527171
] |
def api_call(self, *args, **kwargs):
if 'ids' in kwargs:
kwargs['group_ids'] = ','.join(map(lambda i: str(i), kwargs.pop('ids')))
return super(GroupRemoteManager, self).api_call(*args, **kwargs) | ramusus/django-vkontakte-groups | [
2,
6,
2,
1,
1355991753
] |
def fetch(self, *args, **kwargs):
"""
Add additional fields to parent fetch request
"""
if 'fields' not in kwargs:
kwargs['fields'] = 'members_count'
return super(GroupRemoteManager, self).fetch(*args, **kwargs) | ramusus/django-vkontakte-groups | [
2,
6,
2,
1,
1355991753
] |
def check_members_count(self, group, count):
if group.members_count and count > 0:
division = float(group.members_count) / count
if 0.99 > division or 1.01 < division:
raise CheckMembersCountFailed("Suspicious ammount of members fetched for group %s. "
"Actual ammount is %d, fetched %d, division is %s" % (
group, group.members_count, count, division)) | ramusus/django-vkontakte-groups | [
2,
6,
2,
1,
1355991753
] |
def __str__(self):
return self.name | ramusus/django-vkontakte-groups | [
2,
6,
2,
1,
1355991753
] |
def refresh_kwargs(self):
return {'ids': [self.remote_id]} | ramusus/django-vkontakte-groups | [
2,
6,
2,
1,
1355991753
] |
def wall_comments(self):
if 'vkontakte_wall' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_wall' not in INSTALLED_APPS")
from vkontakte_wall.models import Comment
# TODO: improve schema and queries with using owner_id field
return Comment.objects.filter(remote_id__startswith='-%s_' % self.remote_id) | ramusus/django-vkontakte-groups | [
2,
6,
2,
1,
1355991753
] |
def topics_comments(self):
if 'vkontakte_board' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_board' not in INSTALLED_APPS")
from vkontakte_board.models import Comment
# TODO: improve schema and queries with using owner_id field
return Comment.objects.filter(remote_id__startswith='-%s_' % self.remote_id) | ramusus/django-vkontakte-groups | [
2,
6,
2,
1,
1355991753
] |
def fetch_topics(self, *args, **kwargs):
if 'vkontakte_board' not in settings.INSTALLED_APPS:
raise ImproperlyConfigured("Application 'vkontakte_board' not in INSTALLED_APPS")
from vkontakte_board.models import Topic
return Topic.remote.fetch(group=self, *args, **kwargs) | ramusus/django-vkontakte-groups | [
2,
6,
2,
1,
1355991753
] |
def profile_edit(request): | eldarion/pycon | [
105,
22,
105,
4,
1277157390
] |
def scurve(x, A, mu, sigma):
return 0.5 * A * erf((x - mu) / (np.sqrt(2) * sigma)) + 0.5 * A | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def __init__(self, raw_data_file=None, analyzed_data_file=None, create_pdf=True, scan_parameter_name=None):
'''Initialize the AnalyzeRawData object:
- The c++ objects (Interpreter, Histogrammer, Clusterizer) are constructed
- Create one scan parameter table from all provided raw data files
- Create PdfPages object if needed
Parameters
----------
raw_data_file : string or list, tuple, set of strings
Filename or a list of filenames of the raw data file(s) and analyzed_data_file will be overwritten.
If None and if analyzed_data_file is not None, any existing analyzed_data_file will be opened, otherwise created.
Filename extension (.h5) does not need to be provided.
analyzed_data_file : string
The file name of the output analyzed data file.
If None and if raw_data_file is not None, the filename will be generated from the raw_data_file.
Filename extension (.h5) does not need to be provided.
create_pdf : boolean
If True, plots will be written into a PDF file. Will be set to False, if raw_data_file is None.
scan_parameter_name : string or iterable
The name/names of scan parameter(s) to be used during analysis. If None, the scan parameter
table is used to extract the scan parameters. Otherwise no scan parameter is set.
'''
self.interpreter = PyDataInterpreter()
self.histogram = PyDataHistograming()
raw_data_files = []
if isinstance(raw_data_file, basestring):
# normalize path
raw_data_file = os.path.abspath(raw_data_file)
f_list = analysis_utils.get_data_file_names_from_scan_base(raw_data_file, sort_by_time=True, meta_data_v2=self.interpreter.meta_table_v2)
if f_list:
raw_data_files = f_list
else:
if os.path.splitext(raw_data_file)[1].lower() != ".h5":
raw_data_files.append(os.path.splitext(raw_data_file)[0] + ".h5")
else:
raw_data_files.append(raw_data_file)
elif isinstance(raw_data_file, (list, tuple, set)): # iterable of raw data files
for one_raw_data_file in raw_data_file:
# normalize path
one_raw_data_file = os.path.abspath(one_raw_data_file)
if os.path.splitext(one_raw_data_file)[1].lower() != ".h5":
raw_data_files.append(os.path.splitext(one_raw_data_file)[0] + ".h5")
else:
raw_data_files.append(one_raw_data_file)
else:
raw_data_files = None
if analyzed_data_file is not None:
# normalize path
analyzed_data_file = os.path.abspath(analyzed_data_file)
if os.path.splitext(analyzed_data_file)[1].lower() != ".h5":
self._analyzed_data_file = os.path.splitext(analyzed_data_file)[0] + ".h5"
else: # iterable of raw data files
self._analyzed_data_file = analyzed_data_file
else:
if raw_data_file is not None:
if isinstance(raw_data_file, basestring):
self._analyzed_data_file = os.path.splitext(raw_data_file)[0] + '_interpreted.h5'
else: # iterable of raw data files
commonprefix = os.path.commonprefix(raw_data_files)
if commonprefix:
# use common string for output filename
one_raw_data_file = os.path.abspath(commonprefix)
else:
# take 1st filename for output filename
one_raw_data_file = os.path.abspath(raw_data_files[0])
self._analyzed_data_file = os.path.splitext(one_raw_data_file)[0] + '_interpreted.h5'
else:
self._analyzed_data_file = None
# create a scan parameter table from all raw data files
if raw_data_files is not None:
self.files_dict = analysis_utils.get_parameter_from_files(raw_data_files, parameters=scan_parameter_name)
if not analysis_utils.check_parameter_similarity(self.files_dict):
raise analysis_utils.NotSupportedError('Different scan parameters in multiple files are not supported.')
self.scan_parameters = analysis_utils.create_parameter_table(self.files_dict)
scan_parameter_names = analysis_utils.get_scan_parameter_names(self.scan_parameters)
logging.info('Scan parameter(s) from raw data file(s): %s', (', ').join(scan_parameter_names) if scan_parameter_names else 'None',)
else:
self.files_dict = None
self.scan_parameters = None
self.out_file_h5 = None
self.set_standard_settings()
if self._analyzed_data_file is not None:
if raw_data_file is None:
# assume that output file already exists containing analyzed raw data
self.out_file_h5 = tb.open_file(self._analyzed_data_file, mode="a", title="Interpreted FE-I4 raw data")
else:
# raw data files are given, overwrite any existing file
self.out_file_h5 = tb.open_file(self._analyzed_data_file, mode="w", title="Interpreted FE-I4 raw data")
if raw_data_file is not None and create_pdf:
if isinstance(raw_data_file, basestring):
output_pdf_filename = os.path.splitext(raw_data_file)[0] + ".pdf"
else: # iterable of raw data files
one_raw_data_file = os.path.abspath(raw_data_files[0])
output_pdf_filename = os.path.splitext(one_raw_data_file)[0] + ".pdf"
logging.info('Opening output PDF file: %s', output_pdf_filename)
self.output_pdf = PdfPages(output_pdf_filename)
else:
self.output_pdf = None
self._scan_parameter_name = scan_parameter_name
self._settings_from_file_set = False # the scan settings are in a list of files only in the first one, thus set this flag to suppress warning for other files | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def __exit__(self, *exc_info):
self.close() | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def end_of_cluster_function(hits, clusters, cluster_size, cluster_hit_indices, cluster_index, cluster_id, charge_correction, noisy_pixels, disabled_pixels, seed_hit_index):
clusters[cluster_index].event_status = hits[seed_hit_index].event_status | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def close(self):
del self.interpreter
del self.histogram
del self.clusterizer
self._close_h5()
self._close_pdf() | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def _close_pdf(self):
if self.output_pdf is not None:
logging.info('Closing output PDF file: %s', str(self.output_pdf._file.fh.name))
self.output_pdf.close()
self.output_pdf = None | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def reset(self):
'''Reset the c++ libraries for new analysis.
'''
self.interpreter.reset()
self.histogram.reset() | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def chunk_size(self):
return self._chunk_size | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def chunk_size(self, value):
self.interpreter.set_hit_array_size(2 * value) # worst case: one raw data word becoming 2 hit words
self._chunk_size = value | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_hit_table(self):
return self._create_hit_table | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_hit_table(self, value):
self._create_hit_table = value | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_empty_event_hits(self):
return self._create_empty_event_hits | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_empty_event_hits(self, value):
self._create_empty_event_hits = value
self.interpreter.create_empty_event_hits(value) | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_occupancy_hist(self):
return self._create_occupancy_hist | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_occupancy_hist(self, value):
self._create_occupancy_hist = value
self.histogram.create_occupancy_hist(value) | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_mean_tot_hist(self):
return self._create_mean_tot_hist | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_mean_tot_hist(self, value):
self._create_mean_tot_hist = value
self.histogram.create_mean_tot_hist(value) | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_source_scan_hist(self):
return self._create_source_scan_hist | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_source_scan_hist(self, value):
self._create_source_scan_hist = value | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_tot_hist(self):
return self.create_tot_hist | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_tot_hist(self, value):
self._create_tot_hist = value
self.histogram.create_tot_hist(value) | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_tdc_hist(self):
return self._create_tdc_hist | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_tdc_hist(self, value):
self._create_tdc_hist = value
self.histogram.create_tdc_hist(value) | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_tdc_pixel_hist(self):
return self._create_tdc_pixel_hist | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_tdc_pixel_hist(self, value):
self._create_tdc_pixel_hist = value
self.histogram.create_tdc_pixel_hist(value) | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_tot_pixel_hist(self):
return self._create_tot_pixel_hist | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_tot_pixel_hist(self, value):
self._create_tot_pixel_hist = value
self.histogram.create_tot_pixel_hist(value) | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_rel_bcid_hist(self):
return self._create_rel_bcid_hist | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_rel_bcid_hist(self, value):
self._create_rel_bcid_hist = value
self.histogram.create_rel_bcid_hist(value) | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_threshold_hists(self):
return self._create_threshold_hists | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_threshold_hists(self, value):
self._create_threshold_hists = value | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_threshold_mask(self):
return self._create_threshold_mask | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_threshold_mask(self, value):
self._create_threshold_mask = value | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_fitted_threshold_mask(self):
return self._create_fitted_threshold_mask | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
def create_fitted_threshold_mask(self, value):
self._create_fitted_threshold_mask = value | SiLab-Bonn/pyBAR | [
9,
17,
9,
3,
1422005052
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.