_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q258000 | merge_DA_ph_times | validation | def merge_DA_ph_times(ph_times_d, ph_times_a):
"""Returns a merged timestamp array for Donor+Accept. and bool mask for A.
"""
ph_times = np.hstack([ph_times_d, ph_times_a])
a_em = np.hstack([np.zeros(ph_times_d.size, dtype=np.bool),
np.ones(ph_times_a.size, dtype=np.bool)])
index_sort = ph_times.argsort()
return ph_times[index_sort], a_em[index_sort] | python | {
"resource": ""
} |
q258001 | load_PSFLab_file | validation | def load_PSFLab_file(fname):
"""Load the array `data` in the .mat file `fname`."""
if os.path.exists(fname) or os.path.exists(fname + '.mat'):
return loadmat(fname)['data']
else:
raise IOError("Can't find PSF file '%s'" % fname) | python | {
"resource": ""
} |
q258002 | NumericPSF.hash | validation | def hash(self):
"""Return an hash string computed on the PSF data."""
hash_list = []
for key, value in sorted(self.__dict__.items()):
if not callable(value):
if isinstance(value, np.ndarray):
hash_list.append(value.tostring())
else:
hash_list.append(str(value))
return hashlib.md5(repr(hash_list).encode()).hexdigest() | python | {
"resource": ""
} |
q258003 | git_path_valid | validation | def git_path_valid(git_path=None):
"""
Check whether the git executable is found.
"""
if git_path is None and GIT_PATH is None:
return False
if git_path is None: git_path = GIT_PATH
try:
call([git_path, '--version'])
return True
except OSError:
return False | python | {
"resource": ""
} |
q258004 | get_git_version | validation | def get_git_version(git_path=None):
"""
Get the Git version.
"""
if git_path is None: git_path = GIT_PATH
git_version = check_output([git_path, "--version"]).split()[2]
return git_version | python | {
"resource": ""
} |
q258005 | check_clean_status | validation | def check_clean_status(git_path=None):
"""
Returns whether there are uncommitted changes in the working dir.
"""
output = get_status(git_path)
is_unmodified = (len(output.strip()) == 0)
return is_unmodified | python | {
"resource": ""
} |
q258006 | get_last_commit_line | validation | def get_last_commit_line(git_path=None):
"""
Get one-line description of HEAD commit for repository in current dir.
"""
if git_path is None: git_path = GIT_PATH
output = check_output([git_path, "log", "--pretty=format:'%ad %h %s'",
"--date=short", "-n1"])
return output.strip()[1:-1] | python | {
"resource": ""
} |
q258007 | get_last_commit | validation | def get_last_commit(git_path=None):
"""
Get the HEAD commit SHA1 of repository in current dir.
"""
if git_path is None: git_path = GIT_PATH
line = get_last_commit_line(git_path)
revision_id = line.split()[1]
return revision_id | python | {
"resource": ""
} |
q258008 | print_summary | validation | def print_summary(string='Repository', git_path=None):
"""
Print the last commit line and eventual uncommitted changes.
"""
if git_path is None: git_path = GIT_PATH
# If git is available, check fretbursts version
if not git_path_valid():
print('\n%s revision unknown (git not found).' % string)
else:
last_commit = get_last_commit_line()
print('\n{} revision:\n {}\n'.format(string, last_commit))
if not check_clean_status():
print('\nWARNING -> Uncommitted changes:')
print(get_status()) | python | {
"resource": ""
} |
q258009 | get_bromo_fnames_da | validation | def get_bromo_fnames_da(d_em_kHz, d_bg_kHz, a_em_kHz, a_bg_kHz,
ID='1+2+3+4+5+6', t_tot='480', num_p='30', pM='64',
t_step=0.5e-6, D=1.2e-11, dir_=''):
"""Get filenames for donor and acceptor timestamps for the given parameters
"""
clk_p = t_step/32. # with t_step=0.5us -> 156.25 ns
E_sim = 1.*a_em_kHz/(a_em_kHz + d_em_kHz)
FRET_val = 100.*E_sim
print("Simulated FRET value: %.1f%%" % FRET_val)
d_em_kHz_str = "%04d" % d_em_kHz
a_em_kHz_str = "%04d" % a_em_kHz
d_bg_kHz_str = "%04.1f" % d_bg_kHz
a_bg_kHz_str = "%04.1f" % a_bg_kHz
print("D: EM %s BG %s " % (d_em_kHz_str, d_bg_kHz_str))
print("A: EM %s BG %s " % (a_em_kHz_str, a_bg_kHz_str))
fname_d = ('ph_times_{t_tot}s_D{D}_{np}P_{pM}pM_'
'step{ts_us}us_ID{ID}_EM{em}kHz_BG{bg}kHz.npy').format(
em=d_em_kHz_str, bg=d_bg_kHz_str, t_tot=t_tot, pM=pM,
np=num_p, ID=ID, ts_us=t_step*1e6, D=D)
fname_a = ('ph_times_{t_tot}s_D{D}_{np}P_{pM}pM_'
'step{ts_us}us_ID{ID}_EM{em}kHz_BG{bg}kHz.npy').format(
em=a_em_kHz_str, bg=a_bg_kHz_str, t_tot=t_tot, pM=pM,
np=num_p, ID=ID, ts_us=t_step*1e6, D=D)
print(fname_d)
print(fname_a)
name = ('BroSim_E{:.1f}_dBG{:.1f}k_aBG{:.1f}k_'
'dEM{:.0f}k').format(FRET_val, d_bg_kHz, a_bg_kHz, d_em_kHz)
return dir_+fname_d, dir_+fname_a, name, clk_p, E_sim | python | {
"resource": ""
} |
q258010 | BaseStore.set_sim_params | validation | def set_sim_params(self, nparams, attr_params):
"""Store parameters in `params` in `h5file.root.parameters`.
`nparams` (dict)
A dict as returned by `get_params()` in `ParticlesSimulation()`
The format is:
keys:
used as parameter name
values: (2-elements tuple)
first element is the parameter value
second element is a string used as "title" (description)
`attr_params` (dict)
A dict whole items are stored as attributes in '/parameters'
"""
for name, value in nparams.items():
val = value[0] if value[0] is not None else 'none'
self.h5file.create_array('/parameters', name, obj=val,
title=value[1])
for name, value in attr_params.items():
self.h5file.set_node_attr('/parameters', name, value) | python | {
"resource": ""
} |
q258011 | Box.volume | validation | def volume(self):
"""Box volume in m^3."""
return (self.x2 - self.x1) * (self.y2 - self.y1) * (self.z2 - self.z1) | python | {
"resource": ""
} |
q258012 | Particles._generate | validation | def _generate(num_particles, D, box, rs):
"""Generate a list of `Particle` objects."""
X0 = rs.rand(num_particles) * (box.x2 - box.x1) + box.x1
Y0 = rs.rand(num_particles) * (box.y2 - box.y1) + box.y1
Z0 = rs.rand(num_particles) * (box.z2 - box.z1) + box.z1
return [Particle(D=D, x0=x0, y0=y0, z0=z0)
for x0, y0, z0 in zip(X0, Y0, Z0)] | python | {
"resource": ""
} |
q258013 | Particles.add | validation | def add(self, num_particles, D):
"""Add particles with diffusion coefficient `D` at random positions.
"""
self._plist += self._generate(num_particles, D, box=self.box,
rs=self.rs) | python | {
"resource": ""
} |
q258014 | ParticlesSimulation.datafile_from_hash | validation | def datafile_from_hash(hash_, prefix, path):
"""Return pathlib.Path for a data-file with given hash and prefix.
"""
pattern = '%s_%s*.h*' % (prefix, hash_)
datafiles = list(path.glob(pattern))
if len(datafiles) == 0:
raise NoMatchError('No matches for "%s"' % pattern)
if len(datafiles) > 1:
raise MultipleMatchesError('More than 1 match for "%s"' % pattern)
return datafiles[0] | python | {
"resource": ""
} |
q258015 | ParticlesSimulation._get_group_randomstate | validation | def _get_group_randomstate(rs, seed, group):
"""Return a RandomState, equal to the input unless rs is None.
When rs is None, try to get the random state from the
'last_random_state' attribute in `group`. When not available,
use `seed` to generate a random state. When seed is None the returned
random state will have a random seed.
"""
if rs is None:
rs = np.random.RandomState(seed=seed)
# Try to set the random state from the last session to preserve
# a single random stream when simulating timestamps multiple times
if 'last_random_state' in group._v_attrs:
rs.set_state(group._v_attrs['last_random_state'])
print("INFO: Random state set to last saved state in '%s'." %
group._v_name)
else:
print("INFO: Random state initialized from seed (%d)." % seed)
return rs | python | {
"resource": ""
} |
q258016 | ParticlesSimulation.compact_name | validation | def compact_name(self, hashsize=6):
"""Compact representation of all simulation parameters
"""
# this can be made more robust for ID > 9 (double digit)
s = self.compact_name_core(hashsize, t_max=True)
s += "_ID%d-%d" % (self.ID, self.EID)
return s | python | {
"resource": ""
} |
q258017 | ParticlesSimulation.numeric_params | validation | def numeric_params(self):
"""A dict containing all the simulation numeric-parameters.
The values are 2-element tuples: first element is the value and
second element is a string describing the parameter (metadata).
"""
nparams = dict(
D = (self.diffusion_coeff.mean(), 'Diffusion coefficient (m^2/s)'),
np = (self.num_particles, 'Number of simulated particles'),
t_step = (self.t_step, 'Simulation time-step (s)'),
t_max = (self.t_max, 'Simulation total time (s)'),
ID = (self.ID, 'Simulation ID (int)'),
EID = (self.EID, 'IPython Engine ID (int)'),
pico_mol = (self.concentration() * 1e12,
'Particles concentration (pM)'))
return nparams | python | {
"resource": ""
} |
q258018 | ParticlesSimulation.print_sizes | validation | def print_sizes(self):
"""Print on-disk array sizes required for current set of parameters."""
float_size = 4
MB = 1024 * 1024
size_ = self.n_samples * float_size
em_size = size_ * self.num_particles / MB
pos_size = 3 * size_ * self.num_particles / MB
print(" Number of particles:", self.num_particles)
print(" Number of time steps:", self.n_samples)
print(" Emission array - 1 particle (float32): %.1f MB" % (size_ / MB))
print(" Emission array (float32): %.1f MB" % em_size)
print(" Position array (float32): %.1f MB " % pos_size) | python | {
"resource": ""
} |
q258019 | ParticlesSimulation.simulate_diffusion | validation | def simulate_diffusion(self, save_pos=False, total_emission=True,
radial=False, rs=None, seed=1, path='./',
wrap_func=wrap_periodic,
chunksize=2**19, chunkslice='times', verbose=True):
"""Simulate Brownian motion trajectories and emission rates.
This method performs the Brownian motion simulation using the current
set of parameters. Before running this method you can check the
disk-space requirements using :method:`print_sizes`.
Results are stored to disk in HDF5 format and are accessible in
in `self.emission`, `self.emission_tot` and `self.position` as
pytables arrays.
Arguments:
save_pos (bool): if True, save the particles 3D trajectories
total_emission (bool): if True, store only the total emission array
containing the sum of emission of all the particles.
rs (RandomState object): random state object used as random number
generator. If None, use a random state initialized from seed.
seed (uint): when `rs` is None, `seed` is used to initialize the
random state, otherwise is ignored.
wrap_func (function): the function used to apply the boundary
condition (use :func:`wrap_periodic` or :func:`wrap_mirror`).
path (string): a folder where simulation data is saved.
verbose (bool): if False, prints no output.
"""
if rs is None:
rs = np.random.RandomState(seed=seed)
self.open_store_traj(chunksize=chunksize, chunkslice=chunkslice,
radial=radial, path=path)
# Save current random state for reproducibility
self.traj_group._v_attrs['init_random_state'] = rs.get_state()
em_store = self.emission_tot if total_emission else self.emission
print('- Start trajectories simulation - %s' % ctime(), flush=True)
if verbose:
print('[PID %d] Diffusion time:' % os.getpid(), end='')
i_chunk = 0
t_chunk_size = self.emission.chunkshape[1]
chunk_duration = t_chunk_size * self.t_step
par_start_pos = self.particles.positions
prev_time = 0
for time_size in iter_chunksize(self.n_samples, t_chunk_size):
if verbose:
curr_time = int(chunk_duration * (i_chunk + 1))
if curr_time > prev_time:
print(' %ds' % curr_time, end='', flush=True)
prev_time = curr_time
POS, em = self._sim_trajectories(time_size, par_start_pos, rs,
total_emission=total_emission,
save_pos=save_pos, radial=radial,
wrap_func=wrap_func)
## Append em to the permanent storage
# if total_emission, data is just a linear array
# otherwise is a 2-D array (self.num_particles, c_size)
em_store.append(em)
if save_pos:
self.position.append(np.vstack(POS).astype('float32'))
i_chunk += 1
self.store.h5file.flush()
# Save current random state
self.traj_group._v_attrs['last_random_state'] = rs.get_state()
self.store.h5file.flush()
print('\n- End trajectories simulation - %s' % ctime(), flush=True) | python | {
"resource": ""
} |
q258020 | ParticlesSimulation._sim_timestamps | validation | def _sim_timestamps(self, max_rate, bg_rate, emission, i_start, rs,
ip_start=0, scale=10, sort=True):
"""Simulate timestamps from emission trajectories.
Uses attributes: `.t_step`.
Returns:
A tuple of two arrays: timestamps and particles.
"""
counts_chunk = sim_timetrace_bg(emission, max_rate, bg_rate,
self.t_step, rs=rs)
nrows = emission.shape[0]
if bg_rate is not None:
nrows += 1
assert counts_chunk.shape == (nrows, emission.shape[1])
max_counts = counts_chunk.max()
if max_counts == 0:
return np.array([], dtype=np.int64), np.array([], dtype=np.int64)
time_start = i_start * scale
time_stop = time_start + counts_chunk.shape[1] * scale
ts_range = np.arange(time_start, time_stop, scale, dtype='int64')
# Loop for each particle to compute timestamps
times_chunk_p = []
par_index_chunk_p = []
for ip, counts_chunk_ip in enumerate(counts_chunk):
# Compute timestamps for particle ip for all bins with counts
times_c_ip = []
for v in range(1, max_counts + 1):
times_c_ip.append(ts_range[counts_chunk_ip >= v])
# Stack the timestamps from different "counts"
t = np.hstack(times_c_ip)
# Append current particle
times_chunk_p.append(t)
par_index_chunk_p.append(np.full(t.size, ip + ip_start, dtype='u1'))
# Merge the arrays of different particles
times_chunk = np.hstack(times_chunk_p)
par_index_chunk = np.hstack(par_index_chunk_p)
if sort:
# Sort timestamps inside the merged chunk
index_sort = times_chunk.argsort(kind='mergesort')
times_chunk = times_chunk[index_sort]
par_index_chunk = par_index_chunk[index_sort]
return times_chunk, par_index_chunk | python | {
"resource": ""
} |
q258021 | ParticlesSimulation.simulate_timestamps_mix | validation | def simulate_timestamps_mix(self, max_rates, populations, bg_rate,
rs=None, seed=1, chunksize=2**16,
comp_filter=None, overwrite=False,
skip_existing=False, scale=10,
path=None, t_chunksize=None, timeslice=None):
"""Compute one timestamps array for a mixture of N populations.
Timestamp data are saved to disk and accessible as pytables arrays in
`._timestamps` and `._tparticles`.
The background generated timestamps are assigned a
conventional particle number (last particle index + 1).
Arguments:
max_rates (list): list of the peak max emission rate for each
population.
populations (list of slices): slices to `self.particles`
defining each population.
bg_rate (float, cps): rate for a Poisson background process
rs (RandomState object): random state object used as random number
generator. If None, use a random state initialized from seed.
seed (uint): when `rs` is None, `seed` is used to initialize the
random state, otherwise is ignored.
chunksize (int): chunk size used for the on-disk timestamp array
comp_filter (tables.Filter or None): compression filter to use
for the on-disk `timestamps` and `tparticles` arrays.
If None use default compression.
overwrite (bool): if True, overwrite any pre-existing timestamps
array. If False, never overwrite. The outcome of simulating an
existing array is controlled by `skip_existing` flag.
skip_existing (bool): if True, skip simulation if the same
timestamps array is already present.
scale (int): `self.t_step` is multiplied by `scale` to obtain the
timestamps units in seconds.
path (string): folder where to save the data.
timeslice (float or None): timestamps are simulated until
`timeslice` seconds. If None, simulate until `self.t_max`.
"""
self.open_store_timestamp(chunksize=chunksize, path=path)
rs = self._get_group_randomstate(rs, seed, self.ts_group)
if t_chunksize is None:
t_chunksize = self.emission.chunkshape[1]
timeslice_size = self.n_samples
if timeslice is not None:
timeslice_size = timeslice // self.t_step
name = self._get_ts_name_mix(max_rates, populations, bg_rate, rs=rs)
kw = dict(name=name, clk_p=self.t_step / scale,
max_rates=max_rates, bg_rate=bg_rate, populations=populations,
num_particles=self.num_particles,
bg_particle=self.num_particles,
overwrite=overwrite, chunksize=chunksize)
if comp_filter is not None:
kw.update(comp_filter=comp_filter)
try:
self._timestamps, self._tparticles = (self.ts_store
.add_timestamps(**kw))
except ExistingArrayError as e:
if skip_existing:
print(' - Skipping already present timestamps array.')
return
else:
raise e
self.ts_group._v_attrs['init_random_state'] = rs.get_state()
self._timestamps.attrs['init_random_state'] = rs.get_state()
self._timestamps.attrs['PyBroMo'] = __version__
ts_list, part_list = [], []
# Load emission in chunks, and save only the final timestamps
bg_rates = [None] * (len(max_rates) - 1) + [bg_rate]
prev_time = 0
for i_start, i_end in iter_chunk_index(timeslice_size, t_chunksize):
curr_time = np.around(i_start * self.t_step, decimals=0)
if curr_time > prev_time:
print(' %.1fs' % curr_time, end='', flush=True)
prev_time = curr_time
em_chunk = self.emission[:, i_start:i_end]
times_chunk_s, par_index_chunk_s = \
self._sim_timestamps_populations(
em_chunk, max_rates, populations, bg_rates, i_start,
rs, scale)
# Save sorted timestamps (suffix '_s') and corresponding particles
ts_list.append(times_chunk_s)
part_list.append(par_index_chunk_s)
for ts, part in zip(ts_list, part_list):
self._timestamps.append(ts)
self._tparticles.append(part)
# Save current random state so it can be resumed in the next session
self.ts_group._v_attrs['last_random_state'] = rs.get_state()
self._timestamps.attrs['last_random_state'] = rs.get_state()
self.ts_store.h5file.flush() | python | {
"resource": ""
} |
q258022 | merge_da | validation | def merge_da(ts_d, ts_par_d, ts_a, ts_par_a):
"""Merge donor and acceptor timestamps and particle arrays.
Parameters:
ts_d (array): donor timestamp array
ts_par_d (array): donor particles array
ts_a (array): acceptor timestamp array
ts_par_a (array): acceptor particles array
Returns:
Arrays: timestamps, acceptor bool mask, timestamp particle
"""
ts = np.hstack([ts_d, ts_a])
ts_par = np.hstack([ts_par_d, ts_par_a])
a_ch = np.hstack([np.zeros(ts_d.shape[0], dtype=bool),
np.ones(ts_a.shape[0], dtype=bool)])
index_sort = ts.argsort()
return ts[index_sort], a_ch[index_sort], ts_par[index_sort] | python | {
"resource": ""
} |
q258023 | em_rates_from_E_DA_mix | validation | def em_rates_from_E_DA_mix(em_rates_tot, E_values):
"""D and A emission rates for two populations.
"""
em_rates_d, em_rates_a = [], []
for em_rate_tot, E_value in zip(em_rates_tot, E_values):
em_rate_di, em_rate_ai = em_rates_from_E_DA(em_rate_tot, E_value)
em_rates_d.append(em_rate_di)
em_rates_a.append(em_rate_ai)
return em_rates_d, em_rates_a | python | {
"resource": ""
} |
q258024 | populations_diff_coeff | validation | def populations_diff_coeff(particles, populations):
"""Diffusion coefficients of the two specified populations.
"""
D_counts = particles.diffusion_coeff_counts
if len(D_counts) == 1:
pop_sizes = [pop.stop - pop.start for pop in populations]
assert D_counts[0][1] >= sum(pop_sizes)
D_counts = [(D_counts[0][0], ps) for ps in pop_sizes]
D_list = []
D_pop_start = 0 # start index of diffusion-based populations
for pop, (D, counts) in zip(populations, D_counts):
D_list.append(D)
assert pop.start >= D_pop_start
assert pop.stop <= D_pop_start + counts
D_pop_start += counts
return D_list | python | {
"resource": ""
} |
q258025 | populations_slices | validation | def populations_slices(particles, num_pop_list):
"""2-tuple of slices for selection of two populations.
"""
slices = []
i_prev = 0
for num_pop in num_pop_list:
slices.append(slice(i_prev, i_prev + num_pop))
i_prev += num_pop
return slices | python | {
"resource": ""
} |
q258026 | TimestapSimulation._calc_hash_da | validation | def _calc_hash_da(self, rs):
"""Compute hash of D and A timestamps for single-step D+A case.
"""
self.hash_d = hash_(rs.get_state())[:6]
self.hash_a = self.hash_d | python | {
"resource": ""
} |
q258027 | TimestapSimulation.merge_da | validation | def merge_da(self):
"""Merge donor and acceptor timestamps, computes `ts`, `a_ch`, `part`.
"""
print(' - Merging D and A timestamps', flush=True)
ts_d, ts_par_d = self.S.get_timestamps_part(self.name_timestamps_d)
ts_a, ts_par_a = self.S.get_timestamps_part(self.name_timestamps_a)
ts, a_ch, part = merge_da(ts_d, ts_par_d, ts_a, ts_par_a)
assert a_ch.sum() == ts_a.shape[0]
assert (~a_ch).sum() == ts_d.shape[0]
assert a_ch.size == ts_a.shape[0] + ts_d.shape[0]
self.ts, self.a_ch, self.part = ts, a_ch, part
self.clk_p = ts_d.attrs['clk_p'] | python | {
"resource": ""
} |
q258028 | TimestapSimulation.save_photon_hdf5 | validation | def save_photon_hdf5(self, identity=None, overwrite=True, path=None):
"""Create a smFRET Photon-HDF5 file with current timestamps."""
filepath = self.filepath
if path is not None:
filepath = Path(path, filepath.name)
self.merge_da()
data = self._make_photon_hdf5(identity=identity)
phc.hdf5.save_photon_hdf5(data, h5_fname=str(filepath),
overwrite=overwrite) | python | {
"resource": ""
} |
q258029 | print_attrs | validation | def print_attrs(data_file, node_name='/', which='user', compress=False):
"""Print the HDF5 attributes for `node_name`.
Parameters:
data_file (pytables HDF5 file object): the data file to print
node_name (string): name of the path inside the file to be printed.
Can be either a group or a leaf-node. Default: '/', the root node.
which (string): Valid values are 'user' for user-defined attributes,
'sys' for pytables-specific attributes and 'all' to print both
groups of attributes. Default 'user'.
compress (bool): if True displays at most a line for each attribute.
Default False.
"""
node = data_file.get_node(node_name)
print ('List of attributes for:\n %s\n' % node)
for attr in node._v_attrs._f_list():
print ('\t%s' % attr)
attr_content = repr(node._v_attrs[attr])
if compress:
attr_content = attr_content.split('\n')[0]
print ("\t %s" % attr_content) | python | {
"resource": ""
} |
q258030 | print_children | validation | def print_children(data_file, group='/'):
"""Print all the sub-groups in `group` and leaf-nodes children of `group`.
Parameters:
data_file (pytables HDF5 file object): the data file to print
group (string): path name of the group to be printed.
Default: '/', the root node.
"""
base = data_file.get_node(group)
print ('Groups in:\n %s\n' % base)
for node in base._f_walk_groups():
if node is not base:
print (' %s' % node)
print ('\nLeaf-nodes in %s:' % group)
for node in base._v_leaves.itervalues():
info = node.shape
if len(info) == 0:
info = node.read()
print ('\t%s, %s' % (node.name, info))
if len(node.title) > 0:
print ('\t %s' % node.title) | python | {
"resource": ""
} |
q258031 | RNN.fit | validation | def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None):
"""Train model on given training examples and return the list of costs after each minibatch is processed.
Args:
trX (list) -- Inputs
trY (list) -- Outputs
batch_size (int, optional) -- number of examples in a minibatch (default 64)
n_epochs (int, optional) -- number of epochs to train for (default 1)
len_filter (object, optional) -- object to filter training example by length (default LenFilter())
snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1)
path (str, optional) -- prefix of path where model snapshots are saved.
If None, no snapshots are saved (default None)
Returns:
list -- costs of model after processing each minibatch
"""
if len_filter is not None:
trX, trY = len_filter.filter(trX, trY)
trY = standardize_targets(trY, cost=self.cost)
n = 0.
t = time()
costs = []
for e in range(n_epochs):
epoch_costs = []
for xmb, ymb in self.iterator.iterXY(trX, trY):
c = self._train(xmb, ymb)
epoch_costs.append(c)
n += len(ymb)
if self.verbose >= 2:
n_per_sec = n / (time() - t)
n_left = len(trY) - n % len(trY)
time_left = n_left/n_per_sec
sys.stdout.write("\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time_left))
sys.stdout.flush()
costs.extend(epoch_costs)
status = "Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time() - t)
if self.verbose >= 2:
sys.stdout.write("\r"+status)
sys.stdout.flush()
sys.stdout.write("\n")
elif self.verbose == 1:
print(status)
if path and e % snapshot_freq == 0:
save(self, "{0}.{1}".format(path, e))
return costs | python | {
"resource": ""
} |
q258032 | plane_xz | validation | def plane_xz(size=(10, 10), resolution=(10, 10)) -> VAO:
"""
Generates a plane on the xz axis of a specific size and resolution.
Normals and texture coordinates are also included.
Args:
size: (x, y) tuple
resolution: (x, y) tuple
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance
"""
sx, sz = size
rx, rz = resolution
dx, dz = sx / rx, sz / rz # step
ox, oz = -sx / 2, -sz / 2 # start offset
def gen_pos():
for z in range(rz):
for x in range(rx):
yield ox + x * dx
yield 0
yield oz + z * dz
def gen_uv():
for z in range(rz):
for x in range(rx):
yield x / (rx - 1)
yield 1 - z / (rz - 1)
def gen_normal():
for _ in range(rx * rz):
yield 0.0
yield 1.0
yield 0.0
def gen_index():
for z in range(rz - 1):
for x in range(rx - 1):
# quad poly left
yield z * rz + x + 1
yield z * rz + x
yield z * rz + x + rx
# quad poly right
yield z * rz + x + 1
yield z * rz + x + rx
yield z * rz + x + rx + 1
pos_data = numpy.fromiter(gen_pos(), dtype=numpy.float32)
uv_data = numpy.fromiter(gen_uv(), dtype=numpy.float32)
normal_data = numpy.fromiter(gen_normal(), dtype=numpy.float32)
index_data = numpy.fromiter(gen_index(), dtype=numpy.uint32)
vao = VAO("plane_xz", mode=moderngl.TRIANGLES)
vao.buffer(pos_data, '3f', ['in_position'])
vao.buffer(uv_data, '2f', ['in_uv'])
vao.buffer(normal_data, '3f', ['in_normal'])
vao.index_buffer(index_data, index_element_size=4)
return vao | python | {
"resource": ""
} |
q258033 | GLTF2.load | validation | def load(self):
"""
Deferred loading of the scene
:param scene: The scene object
:param file: Resolved path if changed by finder
"""
self.path = self.find_scene(self.meta.path)
if not self.path:
raise ValueError("Scene '{}' not found".format(self.meta.path))
self.scene = Scene(self.path)
# Load gltf json file
if self.path.suffix == '.gltf':
self.load_gltf()
# Load binary gltf file
if self.path.suffix == '.glb':
self.load_glb()
self.meta.check_version()
self.meta.check_extensions(self.supported_extensions)
self.load_images()
self.load_samplers()
self.load_textures()
self.load_materials()
self.load_meshes()
self.load_nodes()
self.scene.calc_scene_bbox()
self.scene.prepare()
return self.scene | python | {
"resource": ""
} |
q258034 | GLTF2.load_gltf | validation | def load_gltf(self):
"""Loads a gltf json file"""
with open(self.path) as fd:
self.meta = GLTFMeta(self.path, json.load(fd)) | python | {
"resource": ""
} |
q258035 | GLTF2.load_glb | validation | def load_glb(self):
"""Loads a binary gltf file"""
with open(self.path, 'rb') as fd:
# Check header
magic = fd.read(4)
if magic != GLTF_MAGIC_HEADER:
raise ValueError("{} has incorrect header {} != {}".format(self.path, magic, GLTF_MAGIC_HEADER))
version = struct.unpack('<I', fd.read(4))[0]
if version != 2:
raise ValueError("{} has unsupported version {}".format(self.path, version))
# Total file size including headers
_ = struct.unpack('<I', fd.read(4))[0] # noqa
# Chunk 0 - json
chunk_0_length = struct.unpack('<I', fd.read(4))[0]
chunk_0_type = fd.read(4)
if chunk_0_type != b'JSON':
raise ValueError("Expected JSON chunk, not {} in file {}".format(chunk_0_type, self.path))
json_meta = fd.read(chunk_0_length).decode()
# chunk 1 - binary buffer
chunk_1_length = struct.unpack('<I', fd.read(4))[0]
chunk_1_type = fd.read(4)
if chunk_1_type != b'BIN\x00':
raise ValueError("Expected BIN chunk, not {} in file {}".format(chunk_1_type, self.path))
self.meta = GLTFMeta(self.path, json.loads(json_meta), binary_buffer=fd.read(chunk_1_length)) | python | {
"resource": ""
} |
q258036 | GLTFMeta.buffers_exist | validation | def buffers_exist(self):
"""Checks if the bin files referenced exist"""
for buff in self.buffers:
if not buff.is_separate_file:
continue
path = self.path.parent / buff.uri
if not os.path.exists(path):
raise FileNotFoundError("Buffer {} referenced in {} not found".format(path, self.path)) | python | {
"resource": ""
} |
q258037 | GLTFMesh.prepare_attrib_mapping | validation | def prepare_attrib_mapping(self, primitive):
"""Pre-parse buffer mappings for each VBO to detect interleaved data for a primitive"""
buffer_info = []
for name, accessor in primitive.attributes.items():
info = VBOInfo(*accessor.info())
info.attributes.append((name, info.components))
if buffer_info and buffer_info[-1].buffer_view == info.buffer_view:
if buffer_info[-1].interleaves(info):
buffer_info[-1].merge(info)
continue
buffer_info.append(info)
return buffer_info | python | {
"resource": ""
} |
q258038 | GLTFMesh.get_bbox | validation | def get_bbox(self, primitive):
"""Get the bounding box for the mesh"""
accessor = primitive.attributes.get('POSITION')
return accessor.min, accessor.max | python | {
"resource": ""
} |
q258039 | VBOInfo.interleaves | validation | def interleaves(self, info):
"""Does the buffer interleave with this one?"""
return info.byte_offset == self.component_type.size * self.components | python | {
"resource": ""
} |
q258040 | VBOInfo.create | validation | def create(self):
"""Create the VBO"""
dtype = NP_COMPONENT_DTYPE[self.component_type.value]
data = numpy.frombuffer(
self.buffer.read(byte_length=self.byte_length, byte_offset=self.byte_offset),
count=self.count * self.components,
dtype=dtype,
)
return dtype, data | python | {
"resource": ""
} |
q258041 | Camera.set_position | validation | def set_position(self, x, y, z):
"""
Set the 3D position of the camera
:param x: float
:param y: float
:param z: float
"""
self.position = Vector3([x, y, z]) | python | {
"resource": ""
} |
q258042 | Camera._update_yaw_and_pitch | validation | def _update_yaw_and_pitch(self):
"""
Updates the camera vectors based on the current yaw and pitch
"""
front = Vector3([0.0, 0.0, 0.0])
front.x = cos(radians(self.yaw)) * cos(radians(self.pitch))
front.y = sin(radians(self.pitch))
front.z = sin(radians(self.yaw)) * cos(radians(self.pitch))
self.dir = vector.normalise(front)
self.right = vector.normalise(vector3.cross(self.dir, self._up))
self.up = vector.normalise(vector3.cross(self.right, self.dir)) | python | {
"resource": ""
} |
q258043 | Camera.look_at | validation | def look_at(self, vec=None, pos=None):
"""
Look at a specific point
:param vec: Vector3 position
:param pos: python list [x, y, x]
:return: Camera matrix
"""
if pos is None:
vec = Vector3(pos)
if vec is None:
raise ValueError("vector or pos must be set")
return self._gl_look_at(self.position, vec, self._up) | python | {
"resource": ""
} |
q258044 | Camera._gl_look_at | validation | def _gl_look_at(self, pos, target, up):
"""
The standard lookAt method
:param pos: current position
:param target: target position to look at
:param up: direction up
"""
z = vector.normalise(pos - target)
x = vector.normalise(vector3.cross(vector.normalise(up), z))
y = vector3.cross(z, x)
translate = matrix44.create_identity()
translate[3][0] = -pos.x
translate[3][1] = -pos.y
translate[3][2] = -pos.z
rotate = matrix44.create_identity()
rotate[0][0] = x[0] # -- X
rotate[1][0] = x[1]
rotate[2][0] = x[2]
rotate[0][1] = y[0] # -- Y
rotate[1][1] = y[1]
rotate[2][1] = y[2]
rotate[0][2] = z[0] # -- Z
rotate[1][2] = z[1]
rotate[2][2] = z[2]
return matrix44.multiply(translate, rotate) | python | {
"resource": ""
} |
q258045 | SystemCamera.move_state | validation | def move_state(self, direction, activate):
"""
Set the camera position move state
:param direction: What direction to update
:param activate: Start or stop moving in the direction
"""
if direction == RIGHT:
self._xdir = POSITIVE if activate else STILL
elif direction == LEFT:
self._xdir = NEGATIVE if activate else STILL
elif direction == FORWARD:
self._zdir = NEGATIVE if activate else STILL
elif direction == BACKWARD:
self._zdir = POSITIVE if activate else STILL
elif direction == UP:
self._ydir = POSITIVE if activate else STILL
elif direction == DOWN:
self._ydir = NEGATIVE if activate else STILL | python | {
"resource": ""
} |
q258046 | SystemCamera.rot_state | validation | def rot_state(self, x, y):
"""
Set the rotation state of the camera
:param x: viewport x pos
:param y: viewport y pos
"""
if self.last_x is None:
self.last_x = x
if self.last_y is None:
self.last_y = y
x_offset = self.last_x - x
y_offset = self.last_y - y
self.last_x = x
self.last_y = y
x_offset *= self.mouse_sensitivity
y_offset *= self.mouse_sensitivity
self.yaw -= x_offset
self.pitch += y_offset
if self.pitch > 85.0:
self.pitch = 85.0
if self.pitch < -85.0:
self.pitch = -85.0
self._update_yaw_and_pitch() | python | {
"resource": ""
} |
q258047 | BaseText._translate_string | validation | def _translate_string(self, data, length):
"""Translate string into character texture positions"""
for index, char in enumerate(data):
if index == length:
break
yield self._meta.characters - 1 - self._ct[char] | python | {
"resource": ""
} |
q258048 | init | validation | def init(window=None, project=None, timeline=None):
"""
Initialize, load and run
:param manager: The effect manager to use
"""
from demosys.effects.registry import Effect
from demosys.scene import camera
window.timeline = timeline
# Inject attributes into the base Effect class
setattr(Effect, '_window', window)
setattr(Effect, '_ctx', window.ctx)
setattr(Effect, '_project', project)
# Set up the default system camera
window.sys_camera = camera.SystemCamera(aspect=window.aspect_ratio, fov=60.0, near=1, far=1000)
setattr(Effect, '_sys_camera', window.sys_camera)
print("Loading started at", time.time())
project.load()
# Initialize timer
timer_cls = import_string(settings.TIMER)
window.timer = timer_cls()
window.timer.start() | python | {
"resource": ""
} |
q258049 | Scene.draw | validation | def draw(self, projection_matrix=None, camera_matrix=None, time=0):
"""
Draw all the nodes in the scene
:param projection_matrix: projection matrix (bytes)
:param camera_matrix: camera_matrix (bytes)
:param time: The current time
"""
projection_matrix = projection_matrix.astype('f4').tobytes()
camera_matrix = camera_matrix.astype('f4').tobytes()
for node in self.root_nodes:
node.draw(
projection_matrix=projection_matrix,
camera_matrix=camera_matrix,
time=time,
)
self.ctx.clear_samplers(0, 4) | python | {
"resource": ""
} |
q258050 | Scene.draw_bbox | validation | def draw_bbox(self, projection_matrix=None, camera_matrix=None, all=True):
"""Draw scene and mesh bounding boxes"""
projection_matrix = projection_matrix.astype('f4').tobytes()
camera_matrix = camera_matrix.astype('f4').tobytes()
# Scene bounding box
self.bbox_program["m_proj"].write(projection_matrix)
self.bbox_program["m_view"].write(self._view_matrix.astype('f4').tobytes())
self.bbox_program["m_cam"].write(camera_matrix)
self.bbox_program["bb_min"].write(self.bbox_min.astype('f4').tobytes())
self.bbox_program["bb_max"].write(self.bbox_max.astype('f4').tobytes())
self.bbox_program["color"].value = (1.0, 0.0, 0.0)
self.bbox_vao.render(self.bbox_program)
if not all:
return
# Draw bounding box for children
for node in self.root_nodes:
node.draw_bbox(projection_matrix, camera_matrix, self.bbox_program, self.bbox_vao) | python | {
"resource": ""
} |
q258051 | Scene.apply_mesh_programs | validation | def apply_mesh_programs(self, mesh_programs=None):
"""Applies mesh programs to meshes"""
if not mesh_programs:
mesh_programs = [ColorProgram(), TextureProgram(), FallbackProgram()]
for mesh in self.meshes:
for mp in mesh_programs:
instance = mp.apply(mesh)
if instance is not None:
if isinstance(instance, MeshProgram):
mesh.mesh_program = mp
break
else:
raise ValueError("apply() must return a MeshProgram instance, not {}".format(type(instance)))
if not mesh.mesh_program:
print("WARING: No mesh program applied to '{}'".format(mesh.name)) | python | {
"resource": ""
} |
q258052 | Scene.calc_scene_bbox | validation | def calc_scene_bbox(self):
"""Calculate scene bbox"""
bbox_min, bbox_max = None, None
for node in self.root_nodes:
bbox_min, bbox_max = node.calc_global_bbox(
matrix44.create_identity(),
bbox_min,
bbox_max
)
self.bbox_min = bbox_min
self.bbox_max = bbox_max
self.diagonal_size = vector3.length(self.bbox_max - self.bbox_min) | python | {
"resource": ""
} |
q258053 | points_random_3d | validation | def points_random_3d(count, range_x=(-10.0, 10.0), range_y=(-10.0, 10.0), range_z=(-10.0, 10.0), seed=None) -> VAO:
"""
Generates random positions inside a confied box.
Args:
count (int): Number of points to generate
Keyword Args:
range_x (tuple): min-max range for x axis: Example (-10.0. 10.0)
range_y (tuple): min-max range for y axis: Example (-10.0. 10.0)
range_z (tuple): min-max range for z axis: Example (-10.0. 10.0)
seed (int): The random seed
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance
"""
random.seed(seed)
def gen():
for _ in range(count):
yield random.uniform(*range_x)
yield random.uniform(*range_y)
yield random.uniform(*range_z)
data = numpy.fromiter(gen(), count=count * 3, dtype=numpy.float32)
vao = VAO("geometry:points_random_3d", mode=moderngl.POINTS)
vao.buffer(data, '3f', ['in_position'])
return vao | python | {
"resource": ""
} |
q258054 | Timer.start | validation | def start(self):
"""Play the music"""
if self.initialized:
mixer.music.unpause()
else:
mixer.music.play()
# FIXME: Calling play twice to ensure the music is actually playing
mixer.music.play()
self.initialized = True
self.paused = False | python | {
"resource": ""
} |
q258055 | Timer.get_time | validation | def get_time(self) -> float:
"""
Get the current position in the music in seconds
"""
if self.paused:
return self.pause_time
return mixer.music.get_pos() / 1000.0 | python | {
"resource": ""
} |
q258056 | Timer.set_time | validation | def set_time(self, value: float):
"""
Set the current time in the music in seconds causing the player
to seek to this location in the file.
"""
if value < 0:
value = 0
# mixer.music.play(start=value)
mixer.music.set_pos(value) | python | {
"resource": ""
} |
q258057 | DeferredRenderer.draw_buffers | validation | def draw_buffers(self, near, far):
"""
Draw framebuffers for debug purposes.
We need to supply near and far plane so the depth buffer can be linearized when visualizing.
:param near: Projection near value
:param far: Projection far value
"""
self.ctx.disable(moderngl.DEPTH_TEST)
helper.draw(self.gbuffer.color_attachments[0], pos=(0.0, 0.0), scale=(0.25, 0.25))
helper.draw(self.gbuffer.color_attachments[1], pos=(0.5, 0.0), scale=(0.25, 0.25))
helper.draw_depth(self.gbuffer.depth_attachment, near, far, pos=(1.0, 0.0), scale=(0.25, 0.25))
helper.draw(self.lightbuffer.color_attachments[0], pos=(1.5, 0.0), scale=(0.25, 0.25)) | python | {
"resource": ""
} |
q258058 | DeferredRenderer.add_point_light | validation | def add_point_light(self, position, radius):
"""Add point light"""
self.point_lights.append(PointLight(position, radius)) | python | {
"resource": ""
} |
q258059 | DeferredRenderer.render_lights | validation | def render_lights(self, camera_matrix, projection):
"""Render light volumes"""
# Draw light volumes from the inside
self.ctx.front_face = 'cw'
self.ctx.blend_func = moderngl.ONE, moderngl.ONE
helper._depth_sampler.use(location=1)
with self.lightbuffer_scope:
for light in self.point_lights:
# Calc light properties
light_size = light.radius
m_light = matrix44.multiply(light.matrix, camera_matrix)
# Draw the light volume
self.point_light_shader["m_proj"].write(projection.tobytes())
self.point_light_shader["m_light"].write(m_light.astype('f4').tobytes())
self.gbuffer.color_attachments[1].use(location=0)
self.point_light_shader["g_normal"].value = 0
self.gbuffer.depth_attachment.use(location=1)
self.point_light_shader["g_depth"].value = 1
self.point_light_shader["screensize"].value = (self.width, self.height)
self.point_light_shader["proj_const"].value = projection.projection_constants
self.point_light_shader["radius"].value = light_size
self.unit_cube.render(self.point_light_shader)
helper._depth_sampler.clear(location=1) | python | {
"resource": ""
} |
q258060 | DeferredRenderer.render_lights_debug | validation | def render_lights_debug(self, camera_matrix, projection):
"""Render outlines of light volumes"""
self.ctx.enable(moderngl.BLEND)
self.ctx.blend_func = moderngl.SRC_ALPHA, moderngl.ONE_MINUS_SRC_ALPHA
for light in self.point_lights:
m_mv = matrix44.multiply(light.matrix, camera_matrix)
light_size = light.radius
self.debug_shader["m_proj"].write(projection.tobytes())
self.debug_shader["m_mv"].write(m_mv.astype('f4').tobytes())
self.debug_shader["size"].value = light_size
self.unit_cube.render(self.debug_shader, mode=moderngl.LINE_STRIP)
self.ctx.disable(moderngl.BLEND) | python | {
"resource": ""
} |
q258061 | DeferredRenderer.combine | validation | def combine(self):
"""Combine diffuse and light buffer"""
self.gbuffer.color_attachments[0].use(location=0)
self.combine_shader["diffuse_buffer"].value = 0
self.lightbuffer.color_attachments[0].use(location=1)
self.combine_shader["light_buffer"].value = 1
self.quad.render(self.combine_shader) | python | {
"resource": ""
} |
q258062 | Loader.load_shader | validation | def load_shader(self, shader_type: str, path: str):
"""Load a single shader"""
if path:
resolved_path = self.find_program(path)
if not resolved_path:
raise ValueError("Cannot find {} shader '{}'".format(shader_type, path))
print("Loading:", path)
with open(resolved_path, 'r') as fd:
return fd.read() | python | {
"resource": ""
} |
q258063 | Loader.load | validation | def load(self):
"""Load a texture array"""
self._open_image()
width, height, depth = self.image.size[0], self.image.size[1] // self.layers, self.layers
components, data = image_data(self.image)
texture = self.ctx.texture_array(
(width, height, depth),
components,
data,
)
texture.extra = {'meta': self.meta}
if self.meta.mipmap:
texture.build_mipmaps()
self._close_image()
return texture | python | {
"resource": ""
} |
q258064 | Mesh.draw | validation | def draw(self, projection_matrix=None, view_matrix=None, camera_matrix=None, time=0):
"""
Draw the mesh using the assigned mesh program
:param projection_matrix: projection_matrix (bytes)
:param view_matrix: view_matrix (bytes)
:param camera_matrix: camera_matrix (bytes)
"""
if self.mesh_program:
self.mesh_program.draw(
self,
projection_matrix=projection_matrix,
view_matrix=view_matrix,
camera_matrix=camera_matrix,
time=time
) | python | {
"resource": ""
} |
q258065 | Timer.set_time | validation | def set_time(self, value: float):
"""
Set the current time jumping in the timeline.
Args:
value (float): The new time
"""
if value < 0:
value = 0
self.controller.row = self.rps * value | python | {
"resource": ""
} |
q258066 | Effect.draw | validation | def draw(self, time: float, frametime: float, target: moderngl.Framebuffer):
"""
Draw function called by the system every frame when the effect is active.
This method raises ``NotImplementedError`` unless implemented.
Args:
time (float): The current time in seconds.
frametime (float): The time the previous frame used to render in seconds.
target (``moderngl.Framebuffer``): The target FBO for the effect.
"""
raise NotImplementedError("draw() is not implemented") | python | {
"resource": ""
} |
q258067 | Effect.get_program | validation | def get_program(self, label: str) -> moderngl.Program:
"""
Get a program by its label
Args:
label (str): The label for the program
Returns: py:class:`moderngl.Program` instance
"""
return self._project.get_program(label) | python | {
"resource": ""
} |
q258068 | Effect.get_texture | validation | def get_texture(self, label: str) -> Union[moderngl.Texture, moderngl.TextureArray,
moderngl.Texture3D, moderngl.TextureCube]:
"""
Get a texture by its label
Args:
label (str): The Label for the texture
Returns:
The py:class:`moderngl.Texture` instance
"""
return self._project.get_texture(label) | python | {
"resource": ""
} |
q258069 | Effect.get_effect_class | validation | def get_effect_class(self, effect_name: str, package_name: str = None) -> Type['Effect']:
"""
Get an effect class by the class name
Args:
effect_name (str): Name of the effect class
Keyword Args:
package_name (str): The package the effect belongs to. This is optional and only
needed when effect class names are not unique.
Returns:
:py:class:`Effect` class
"""
return self._project.get_effect_class(effect_name, package_name=package_name) | python | {
"resource": ""
} |
q258070 | Effect.create_projection | validation | def create_projection(self, fov: float = 75.0, near: float = 1.0, far: float = 100.0, aspect_ratio: float = None):
"""
Create a projection matrix with the following parameters.
When ``aspect_ratio`` is not provided the configured aspect
ratio for the window will be used.
Args:
fov (float): Field of view (float)
near (float): Camera near value
far (float): Camrea far value
Keyword Args:
aspect_ratio (float): Aspect ratio of the viewport
Returns:
The projection matrix as a float32 :py:class:`numpy.array`
"""
return matrix44.create_perspective_projection_matrix(
fov,
aspect_ratio or self.window.aspect_ratio,
near,
far,
dtype='f4',
) | python | {
"resource": ""
} |
q258071 | Effect.create_transformation | validation | def create_transformation(self, rotation=None, translation=None):
"""
Creates a transformation matrix woth rotations and translation.
Args:
rotation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`
translation: 3 component vector as a list, tuple, or :py:class:`pyrr.Vector3`
Returns:
A 4x4 matrix as a :py:class:`numpy.array`
"""
mat = None
if rotation is not None:
mat = Matrix44.from_eulers(Vector3(rotation))
if translation is not None:
trans = matrix44.create_from_translation(Vector3(translation))
if mat is None:
mat = trans
else:
mat = matrix44.multiply(mat, trans)
return mat | python | {
"resource": ""
} |
q258072 | Effect.create_normal_matrix | validation | def create_normal_matrix(self, modelview):
"""
Creates a normal matrix from modelview matrix
Args:
modelview: The modelview matrix
Returns:
A 3x3 Normal matrix as a :py:class:`numpy.array`
"""
normal_m = Matrix33.from_matrix44(modelview)
normal_m = normal_m.inverse
normal_m = normal_m.transpose()
return normal_m | python | {
"resource": ""
} |
q258073 | available_templates | validation | def available_templates(value):
"""Scan for available templates in effect_templates"""
templates = list_templates()
if value not in templates:
raise ArgumentTypeError("Effect template '{}' does not exist.\n Available templates: {} ".format(
value, ", ".join(templates)))
return value | python | {
"resource": ""
} |
q258074 | root_path | validation | def root_path():
"""Get the absolute path to the root of the demosys package"""
module_dir = os.path.dirname(globals()['__file__'])
return os.path.dirname(os.path.dirname(module_dir)) | python | {
"resource": ""
} |
q258075 | Loader.load | validation | def load(self):
"""Load a file in text mode"""
self.meta.resolved_path = self.find_data(self.meta.path)
if not self.meta.resolved_path:
raise ImproperlyConfigured("Data file '{}' not found".format(self.meta.path))
print("Loading:", self.meta.path)
with open(self.meta.resolved_path, 'r') as fd:
return fd.read() | python | {
"resource": ""
} |
q258076 | get_finder | validation | def get_finder(import_path):
"""
Get a finder class from an import path.
Raises ``demosys.core.exceptions.ImproperlyConfigured`` if the finder is not found.
This function uses an lru cache.
:param import_path: string representing an import path
:return: An instance of the finder
"""
Finder = import_string(import_path)
if not issubclass(Finder, BaseFileSystemFinder):
raise ImproperlyConfigured('Finder {} is not a subclass of core.finders.FileSystemFinder'.format(import_path))
return Finder() | python | {
"resource": ""
} |
q258077 | BaseFileSystemFinder.find | validation | def find(self, path: Path):
"""
Find a file in the path. The file may exist in multiple
paths. The last found file will be returned.
:param path: The path to find
:return: The absolute path to the file or None if not found
"""
# Update paths from settings to make them editable runtime
# This is only possible for FileSystemFinders
if getattr(self, 'settings_attr', None):
self.paths = getattr(settings, self.settings_attr)
path_found = None
for entry in self.paths:
abspath = entry / path
if abspath.exists():
path_found = abspath
return path_found | python | {
"resource": ""
} |
q258078 | Projection.update | validation | def update(self, aspect_ratio=None, fov=None, near=None, far=None):
"""
Update the internal projection matrix based on current values
or values passed in if specified.
:param aspect_ratio: New aspect ratio
:param fov: New field of view
:param near: New near value
:param far: New far value
"""
self.aspect_ratio = aspect_ratio or self.aspect_ratio
self.fov = fov or self.fov
self.near = near or self.near
self.far = far or self.far
self.matrix = Matrix44.perspective_projection(self.fov, self.aspect_ratio, self.near, self.far) | python | {
"resource": ""
} |
q258079 | Node.draw | validation | def draw(self, projection_matrix=None, camera_matrix=None, time=0):
"""
Draw node and children
:param projection_matrix: projection matrix (bytes)
:param camera_matrix: camera_matrix (bytes)
:param time: The current time
"""
if self.mesh:
self.mesh.draw(
projection_matrix=projection_matrix,
view_matrix=self.matrix_global_bytes,
camera_matrix=camera_matrix,
time=time
)
for child in self.children:
child.draw(
projection_matrix=projection_matrix,
camera_matrix=camera_matrix,
time=time
) | python | {
"resource": ""
} |
q258080 | Node.calc_global_bbox | validation | def calc_global_bbox(self, view_matrix, bbox_min, bbox_max):
"""Recursive calculation of scene bbox"""
if self.matrix is not None:
view_matrix = matrix44.multiply(self.matrix, view_matrix)
if self.mesh:
bbox_min, bbox_max = self.mesh.calc_global_bbox(view_matrix, bbox_min, bbox_max)
for child in self.children:
bbox_min, bbox_max = child.calc_global_bbox(view_matrix, bbox_min, bbox_max)
return bbox_min, bbox_max | python | {
"resource": ""
} |
q258081 | Window.swap_buffers | validation | def swap_buffers(self):
"""
Swaps buffers, incement the framecounter and pull events.
"""
self.frames += 1
glfw.swap_buffers(self.window)
self.poll_events() | python | {
"resource": ""
} |
q258082 | Window.resize | validation | def resize(self, width, height):
"""
Sets the new size and buffer size internally
"""
self.width = width
self.height = height
self.buffer_width, self.buffer_height = glfw.get_framebuffer_size(self.window)
self.set_default_viewport() | python | {
"resource": ""
} |
q258083 | Window.check_glfw_version | validation | def check_glfw_version(self):
"""
Ensure glfw library version is compatible
"""
print("glfw version: {} (python wrapper version {})".format(glfw.get_version(), glfw.__version__))
if glfw.get_version() < self.min_glfw_version:
raise ValueError("Please update glfw binaries to version {} or later".format(self.min_glfw_version)) | python | {
"resource": ""
} |
q258084 | quad_2d | validation | def quad_2d(width, height, xpos=0.0, ypos=0.0) -> VAO:
"""
Creates a 2D quad VAO using 2 triangles with normals and texture coordinates.
Args:
width (float): Width of the quad
height (float): Height of the quad
Keyword Args:
xpos (float): Center position x
ypos (float): Center position y
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance.
"""
pos = numpy.array([
xpos - width / 2.0, ypos + height / 2.0, 0.0,
xpos - width / 2.0, ypos - height / 2.0, 0.0,
xpos + width / 2.0, ypos - height / 2.0, 0.0,
xpos - width / 2.0, ypos + height / 2.0, 0.0,
xpos + width / 2.0, ypos - height / 2.0, 0.0,
xpos + width / 2.0, ypos + height / 2.0, 0.0,
], dtype=numpy.float32)
normals = numpy.array([
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
0.0, 0.0, 1.0,
], dtype=numpy.float32)
uvs = numpy.array([
0.0, 1.0,
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
1.0, 0.0,
1.0, 1.0,
], dtype=numpy.float32)
vao = VAO("geometry:quad", mode=moderngl.TRIANGLES)
vao.buffer(pos, '3f', ["in_position"])
vao.buffer(normals, '3f', ["in_normal"])
vao.buffer(uvs, '2f', ["in_uv"])
return vao | python | {
"resource": ""
} |
q258085 | translate_buffer_format | validation | def translate_buffer_format(vertex_format):
"""Translate the buffer format"""
buffer_format = []
attributes = []
mesh_attributes = []
if "T2F" in vertex_format:
buffer_format.append("2f")
attributes.append("in_uv")
mesh_attributes.append(("TEXCOORD_0", "in_uv", 2))
if "C3F" in vertex_format:
buffer_format.append("3f")
attributes.append("in_color")
mesh_attributes.append(("NORMAL", "in_color", 3))
if "N3F" in vertex_format:
buffer_format.append("3f")
attributes.append("in_normal")
mesh_attributes.append(("NORMAL", "in_normal", 3))
buffer_format.append("3f")
attributes.append("in_position")
mesh_attributes.append(("POSITION", "in_position", 3))
return " ".join(buffer_format), attributes, mesh_attributes | python | {
"resource": ""
} |
q258086 | Timer.stop | validation | def stop(self) -> float:
"""
Stop the timer
Returns:
The time the timer was stopped
"""
self.stop_time = time.time()
return self.stop_time - self.start_time - self.offset | python | {
"resource": ""
} |
q258087 | Timer.set_time | validation | def set_time(self, value: float):
"""
Set the current time. This can be used to jump in the timeline.
Args:
value (float): The new time
"""
if value < 0:
value = 0
self.offset += self.get_time() - value | python | {
"resource": ""
} |
q258088 | Scenes.resolve_loader | validation | def resolve_loader(self, meta: SceneDescription):
"""
Resolve scene loader based on file extension
"""
for loader_cls in self._loaders:
if loader_cls.supports_file(meta):
meta.loader_cls = loader_cls
break
else:
raise ImproperlyConfigured(
"Scene {} has no loader class registered. Check settings.SCENE_LOADERS".format(meta.path)) | python | {
"resource": ""
} |
q258089 | Window.on_resize | validation | def on_resize(self, width, height):
"""
Pyglet specific callback for window resize events.
"""
self.width, self.height = width, height
self.buffer_width, self.buffer_height = width, height
self.resize(width, height) | python | {
"resource": ""
} |
q258090 | Window.swap_buffers | validation | def swap_buffers(self):
"""
Swap buffers, increment frame counter and pull events
"""
if not self.window.context:
return
self.frames += 1
self.window.flip()
self.window.dispatch_events() | python | {
"resource": ""
} |
q258091 | sphere | validation | def sphere(radius=0.5, sectors=32, rings=16) -> VAO:
"""
Creates a sphere.
Keyword Args:
radius (float): Radius or the sphere
rings (int): number or horizontal rings
sectors (int): number of vertical segments
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance
"""
R = 1.0 / (rings - 1)
S = 1.0 / (sectors - 1)
vertices = [0] * (rings * sectors * 3)
normals = [0] * (rings * sectors * 3)
uvs = [0] * (rings * sectors * 2)
v, n, t = 0, 0, 0
for r in range(rings):
for s in range(sectors):
y = math.sin(-math.pi / 2 + math.pi * r * R)
x = math.cos(2 * math.pi * s * S) * math.sin(math.pi * r * R)
z = math.sin(2 * math.pi * s * S) * math.sin(math.pi * r * R)
uvs[t] = s * S
uvs[t + 1] = r * R
vertices[v] = x * radius
vertices[v + 1] = y * radius
vertices[v + 2] = z * radius
normals[n] = x
normals[n + 1] = y
normals[n + 2] = z
t += 2
v += 3
n += 3
indices = [0] * rings * sectors * 6
i = 0
for r in range(rings - 1):
for s in range(sectors - 1):
indices[i] = r * sectors + s
indices[i + 1] = (r + 1) * sectors + (s + 1)
indices[i + 2] = r * sectors + (s + 1)
indices[i + 3] = r * sectors + s
indices[i + 4] = (r + 1) * sectors + s
indices[i + 5] = (r + 1) * sectors + (s + 1)
i += 6
vbo_vertices = numpy.array(vertices, dtype=numpy.float32)
vbo_normals = numpy.array(normals, dtype=numpy.float32)
vbo_uvs = numpy.array(uvs, dtype=numpy.float32)
vbo_elements = numpy.array(indices, dtype=numpy.uint32)
vao = VAO("sphere", mode=mlg.TRIANGLES)
# VBOs
vao.buffer(vbo_vertices, '3f', ['in_position'])
vao.buffer(vbo_normals, '3f', ['in_normal'])
vao.buffer(vbo_uvs, '2f', ['in_uv'])
vao.index_buffer(vbo_elements, index_element_size=4)
return vao | python | {
"resource": ""
} |
q258092 | Window.swap_buffers | validation | def swap_buffers(self):
"""
Headless window currently don't support double buffering.
We only increment the frame counter here.
"""
self.frames += 1
if self.headless_frames and self.frames >= self.headless_frames:
self.close() | python | {
"resource": ""
} |
q258093 | BaseRegistry.load | validation | def load(self, meta: ResourceDescription) -> Any:
"""
Loads a resource or return existing one
:param meta: The resource description
"""
self._check_meta(meta)
self.resolve_loader(meta)
return meta.loader_cls(meta).load() | python | {
"resource": ""
} |
q258094 | BaseRegistry.load_pool | validation | def load_pool(self):
"""
Loads all the data files using the configured finders.
"""
for meta in self._resources:
resource = self.load(meta)
yield meta, resource
self._resources = [] | python | {
"resource": ""
} |
q258095 | BaseRegistry.resolve_loader | validation | def resolve_loader(self, meta: ResourceDescription):
"""
Attempts to assign a loader class to a resource description
:param meta: The resource description instance
"""
meta.loader_cls = self.get_loader(meta, raise_on_error=True) | python | {
"resource": ""
} |
q258096 | BaseRegistry.get_loader | validation | def get_loader(self, meta: ResourceDescription, raise_on_error=False) -> BaseLoader:
"""
Attempts to get a loader
:param meta: The resource description instance
:param raise_on_error: Raise ImproperlyConfigured if the loader cannot be resolved
:returns: The requested loader class
"""
for loader in self._loaders:
if loader.name == meta.loader:
return loader
if raise_on_error:
raise ImproperlyConfigured(
"Resource has invalid loader '{}': {}\nAvailiable loaders: {}".format(
meta.loader, meta, [loader.name for loader in self._loaders])) | python | {
"resource": ""
} |
q258097 | Window.resize | validation | def resize(self, width, height):
"""
Pyqt specific resize callback.
"""
if not self.fbo:
return
# pyqt reports sizes in actual buffer size
self.width = width // self.widget.devicePixelRatio()
self.height = height // self.widget.devicePixelRatio()
self.buffer_width = width
self.buffer_height = height
super().resize(width, height) | python | {
"resource": ""
} |
q258098 | BaseWindow.draw | validation | def draw(self, current_time, frame_time):
"""
Draws a frame. Internally it calls the
configured timeline's draw method.
Args:
current_time (float): The current time (preferrably always from the configured timer class)
frame_time (float): The duration of the previous frame in seconds
"""
self.set_default_viewport()
self.timeline.draw(current_time, frame_time, self.fbo) | python | {
"resource": ""
} |
q258099 | BaseWindow.clear | validation | def clear(self):
"""
Clear the window buffer
"""
self.ctx.fbo.clear(
red=self.clear_color[0],
green=self.clear_color[1],
blue=self.clear_color[2],
alpha=self.clear_color[3],
depth=self.clear_depth,
) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.