body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
cc53416e723b92003704627f14f697a447642cbbad6047b6f48d51615cf9e221
|
def get_taddol_ox_dists(universe, sel_dict=False):
'Get array of oxygen distances in TADDOL trajectory'
warn('get_taddol_ox_dists will soon be deprecated. Use Taddol.ox_dists', DeprecationWarning)
if (not sel_dict):
sel_dict = get_taddol_selections(universe)
output = []
for frame in universe.trajectory:
box = universe.dimensions
output.append((universe.trajectory.time, get_dist_dict(sel_dict, 'aoxl', 'aoxr', box=box), get_dist_dict(sel_dict, 'aoxl', 'cyclon', box=box), get_dist_dict(sel_dict, 'aoxr', 'cyclon', box=box)))
return np.array(output)
|
Get array of oxygen distances in TADDOL trajectory
|
paratemp/coordinate_analysis.py
|
get_taddol_ox_dists
|
theavey/ParaTemp
| 12
|
python
|
def get_taddol_ox_dists(universe, sel_dict=False):
warn('get_taddol_ox_dists will soon be deprecated. Use Taddol.ox_dists', DeprecationWarning)
if (not sel_dict):
sel_dict = get_taddol_selections(universe)
output = []
for frame in universe.trajectory:
box = universe.dimensions
output.append((universe.trajectory.time, get_dist_dict(sel_dict, 'aoxl', 'aoxr', box=box), get_dist_dict(sel_dict, 'aoxl', 'cyclon', box=box), get_dist_dict(sel_dict, 'aoxr', 'cyclon', box=box)))
return np.array(output)
|
def get_taddol_ox_dists(universe, sel_dict=False):
warn('get_taddol_ox_dists will soon be deprecated. Use Taddol.ox_dists', DeprecationWarning)
if (not sel_dict):
sel_dict = get_taddol_selections(universe)
output = []
for frame in universe.trajectory:
box = universe.dimensions
output.append((universe.trajectory.time, get_dist_dict(sel_dict, 'aoxl', 'aoxr', box=box), get_dist_dict(sel_dict, 'aoxl', 'cyclon', box=box), get_dist_dict(sel_dict, 'aoxr', 'cyclon', box=box)))
return np.array(output)<|docstring|>Get array of oxygen distances in TADDOL trajectory<|endoftext|>
|
3a5365c9aaac92422ab3acc3dfebf4c1f8921699389d3225e8d1cdfa32f7451e
|
def make_plot_taddol_ox_dists(data, save=False, save_format='pdf', save_base_name='ox_dists', display=True):
'Make plot of alcoholic O distances in TADDOL trajectory'
warn('make_plot_taddol_ox_dists will soon be deprecated. Use Taddol.plot_ox_dists', DeprecationWarning)
(fig, axes) = plt.subplots()
axes.plot(data[(:, 0)], data[(:, 1)], label='O-O')
axes.plot(data[(:, 0)], data[(:, 2)], label='O(l)-Cy')
axes.plot(data[(:, 0)], data[(:, 3)], label='O(r)-Cy')
axes.legend()
axes.set_xlabel('time / ps')
axes.set_ylabel('distance / $\\mathrm{\\AA}$')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
Make plot of alcoholic O distances in TADDOL trajectory
|
paratemp/coordinate_analysis.py
|
make_plot_taddol_ox_dists
|
theavey/ParaTemp
| 12
|
python
|
def make_plot_taddol_ox_dists(data, save=False, save_format='pdf', save_base_name='ox_dists', display=True):
warn('make_plot_taddol_ox_dists will soon be deprecated. Use Taddol.plot_ox_dists', DeprecationWarning)
(fig, axes) = plt.subplots()
axes.plot(data[(:, 0)], data[(:, 1)], label='O-O')
axes.plot(data[(:, 0)], data[(:, 2)], label='O(l)-Cy')
axes.plot(data[(:, 0)], data[(:, 3)], label='O(r)-Cy')
axes.legend()
axes.set_xlabel('time / ps')
axes.set_ylabel('distance / $\\mathrm{\\AA}$')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
def make_plot_taddol_ox_dists(data, save=False, save_format='pdf', save_base_name='ox_dists', display=True):
warn('make_plot_taddol_ox_dists will soon be deprecated. Use Taddol.plot_ox_dists', DeprecationWarning)
(fig, axes) = plt.subplots()
axes.plot(data[(:, 0)], data[(:, 1)], label='O-O')
axes.plot(data[(:, 0)], data[(:, 2)], label='O(l)-Cy')
axes.plot(data[(:, 0)], data[(:, 3)], label='O(r)-Cy')
axes.legend()
axes.set_xlabel('time / ps')
axes.set_ylabel('distance / $\\mathrm{\\AA}$')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None<|docstring|>Make plot of alcoholic O distances in TADDOL trajectory<|endoftext|>
|
e3bf3cc405cea5a887b2996e7549c3a2932430b94995ad91dd3532bbff869d45
|
def make_hist_taddol_ox_dists(data, n_bins=10, save=False, save_format='pdf', save_base_name='ox_dists_hist', display=True, separate=False):
'Make histogram of alcoholic O distances in TADDOL trajectory'
warn('make_hist_taddol_ox_dists will soon be deprecated. Use Taddol.hist_ox_dists', DeprecationWarning)
legend_entries = ['O-O', 'O(l)-Cy', 'O(r)-Cy']
if separate:
(fig, axes) = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)
handles = []
colors = mpl.rcParams['axes.prop_cycle'].by_key().values()[0]
for i in range(3):
ax = axes.flat[i]
(n, bins, patches) = ax.hist(data[(:, (1 + i))], n_bins, label=legend_entries[i], facecolor=colors[i])
handles.append(patches[0])
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
ax.set_ylabel('frequency')
axes.flat[3].axis('off')
axes.flat[3].legend(handles, legend_entries, loc='center')
else:
(fig, ax) = plt.subplots()
ax.hist(data[(:, 1:)], n_bins, histtype='stepfilled')
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
ax.set_ylabel('frequency')
ax.legend(legend_entries)
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
Make histogram of alcoholic O distances in TADDOL trajectory
|
paratemp/coordinate_analysis.py
|
make_hist_taddol_ox_dists
|
theavey/ParaTemp
| 12
|
python
|
def make_hist_taddol_ox_dists(data, n_bins=10, save=False, save_format='pdf', save_base_name='ox_dists_hist', display=True, separate=False):
warn('make_hist_taddol_ox_dists will soon be deprecated. Use Taddol.hist_ox_dists', DeprecationWarning)
legend_entries = ['O-O', 'O(l)-Cy', 'O(r)-Cy']
if separate:
(fig, axes) = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)
handles = []
colors = mpl.rcParams['axes.prop_cycle'].by_key().values()[0]
for i in range(3):
ax = axes.flat[i]
(n, bins, patches) = ax.hist(data[(:, (1 + i))], n_bins, label=legend_entries[i], facecolor=colors[i])
handles.append(patches[0])
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
ax.set_ylabel('frequency')
axes.flat[3].axis('off')
axes.flat[3].legend(handles, legend_entries, loc='center')
else:
(fig, ax) = plt.subplots()
ax.hist(data[(:, 1:)], n_bins, histtype='stepfilled')
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
ax.set_ylabel('frequency')
ax.legend(legend_entries)
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
def make_hist_taddol_ox_dists(data, n_bins=10, save=False, save_format='pdf', save_base_name='ox_dists_hist', display=True, separate=False):
warn('make_hist_taddol_ox_dists will soon be deprecated. Use Taddol.hist_ox_dists', DeprecationWarning)
legend_entries = ['O-O', 'O(l)-Cy', 'O(r)-Cy']
if separate:
(fig, axes) = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)
handles = []
colors = mpl.rcParams['axes.prop_cycle'].by_key().values()[0]
for i in range(3):
ax = axes.flat[i]
(n, bins, patches) = ax.hist(data[(:, (1 + i))], n_bins, label=legend_entries[i], facecolor=colors[i])
handles.append(patches[0])
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
ax.set_ylabel('frequency')
axes.flat[3].axis('off')
axes.flat[3].legend(handles, legend_entries, loc='center')
else:
(fig, ax) = plt.subplots()
ax.hist(data[(:, 1:)], n_bins, histtype='stepfilled')
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
ax.set_ylabel('frequency')
ax.legend(legend_entries)
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None<|docstring|>Make histogram of alcoholic O distances in TADDOL trajectory<|endoftext|>
|
2cd26dc11d71d1c2de9a08b9a84f2936ec7ae8b47f86c28a5b035c539c083730
|
def get_taddol_pi_dists(universe, sel_dict=False):
'Get array of phenanthryl distances in TADDOL trajectory'
warn('get_taddol_pi_dists will soon be deprecated. Use Taddol.pi_dists', DeprecationWarning)
if (not sel_dict):
sel_dict = get_taddol_selections(universe)
output = []
gdd = get_dist_dict
sd = sel_dict
for frame in universe.trajectory:
output.append((universe.trajectory.time, gdd(sd, 'phenrtt', 'phenltt'), gdd(sd, 'phenrtt', 'phenltb'), gdd(sd, 'phenrtt', 'phenlbt'), gdd(sd, 'phenrtt', 'phenlbb'), gdd(sd, 'phenrtb', 'phenltt'), gdd(sd, 'phenrtb', 'phenltb'), gdd(sd, 'phenrtb', 'phenlbt'), gdd(sd, 'phenrtb', 'phenlbb'), gdd(sd, 'phenrbt', 'phenltt'), gdd(sd, 'phenrbt', 'phenltb'), gdd(sd, 'phenrbt', 'phenlbt'), gdd(sd, 'phenrbt', 'phenlbb'), gdd(sd, 'phenrbb', 'phenltt'), gdd(sd, 'phenrbb', 'phenltb'), gdd(sd, 'phenrbb', 'phenlbt'), gdd(sd, 'phenrbb', 'phenlbb')))
return np.array(output)
|
Get array of phenanthryl distances in TADDOL trajectory
|
paratemp/coordinate_analysis.py
|
get_taddol_pi_dists
|
theavey/ParaTemp
| 12
|
python
|
def get_taddol_pi_dists(universe, sel_dict=False):
warn('get_taddol_pi_dists will soon be deprecated. Use Taddol.pi_dists', DeprecationWarning)
if (not sel_dict):
sel_dict = get_taddol_selections(universe)
output = []
gdd = get_dist_dict
sd = sel_dict
for frame in universe.trajectory:
output.append((universe.trajectory.time, gdd(sd, 'phenrtt', 'phenltt'), gdd(sd, 'phenrtt', 'phenltb'), gdd(sd, 'phenrtt', 'phenlbt'), gdd(sd, 'phenrtt', 'phenlbb'), gdd(sd, 'phenrtb', 'phenltt'), gdd(sd, 'phenrtb', 'phenltb'), gdd(sd, 'phenrtb', 'phenlbt'), gdd(sd, 'phenrtb', 'phenlbb'), gdd(sd, 'phenrbt', 'phenltt'), gdd(sd, 'phenrbt', 'phenltb'), gdd(sd, 'phenrbt', 'phenlbt'), gdd(sd, 'phenrbt', 'phenlbb'), gdd(sd, 'phenrbb', 'phenltt'), gdd(sd, 'phenrbb', 'phenltb'), gdd(sd, 'phenrbb', 'phenlbt'), gdd(sd, 'phenrbb', 'phenlbb')))
return np.array(output)
|
def get_taddol_pi_dists(universe, sel_dict=False):
warn('get_taddol_pi_dists will soon be deprecated. Use Taddol.pi_dists', DeprecationWarning)
if (not sel_dict):
sel_dict = get_taddol_selections(universe)
output = []
gdd = get_dist_dict
sd = sel_dict
for frame in universe.trajectory:
output.append((universe.trajectory.time, gdd(sd, 'phenrtt', 'phenltt'), gdd(sd, 'phenrtt', 'phenltb'), gdd(sd, 'phenrtt', 'phenlbt'), gdd(sd, 'phenrtt', 'phenlbb'), gdd(sd, 'phenrtb', 'phenltt'), gdd(sd, 'phenrtb', 'phenltb'), gdd(sd, 'phenrtb', 'phenlbt'), gdd(sd, 'phenrtb', 'phenlbb'), gdd(sd, 'phenrbt', 'phenltt'), gdd(sd, 'phenrbt', 'phenltb'), gdd(sd, 'phenrbt', 'phenlbt'), gdd(sd, 'phenrbt', 'phenlbb'), gdd(sd, 'phenrbb', 'phenltt'), gdd(sd, 'phenrbb', 'phenltb'), gdd(sd, 'phenrbb', 'phenlbt'), gdd(sd, 'phenrbb', 'phenlbb')))
return np.array(output)<|docstring|>Get array of phenanthryl distances in TADDOL trajectory<|endoftext|>
|
5d3447cbc413ded504a41268bca7e1f478dbec9c91a6d7a357c5fe2a92fe74d0
|
def make_taddol_pi_dist_array(dists, save=False, save_format='pdf', save_base_name='pi_dists', display=True):
'Plot array of pi distances in TADDOL trajectory'
fig = plot_dist_array(dists)
[ax.get_xaxis().set_ticks([]) for ax in fig.axes]
fig.text(0.05, 0.585, 'distance / $\\mathrm{\\AA}$', ha='center', rotation='vertical')
fig.text(0.513, 0.08, 'time', ha='center')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
Plot array of pi distances in TADDOL trajectory
|
paratemp/coordinate_analysis.py
|
make_taddol_pi_dist_array
|
theavey/ParaTemp
| 12
|
python
|
def make_taddol_pi_dist_array(dists, save=False, save_format='pdf', save_base_name='pi_dists', display=True):
fig = plot_dist_array(dists)
[ax.get_xaxis().set_ticks([]) for ax in fig.axes]
fig.text(0.05, 0.585, 'distance / $\\mathrm{\\AA}$', ha='center', rotation='vertical')
fig.text(0.513, 0.08, 'time', ha='center')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
def make_taddol_pi_dist_array(dists, save=False, save_format='pdf', save_base_name='pi_dists', display=True):
fig = plot_dist_array(dists)
[ax.get_xaxis().set_ticks([]) for ax in fig.axes]
fig.text(0.05, 0.585, 'distance / $\\mathrm{\\AA}$', ha='center', rotation='vertical')
fig.text(0.513, 0.08, 'time', ha='center')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None<|docstring|>Plot array of pi distances in TADDOL trajectory<|endoftext|>
|
0ccb2c7685f29cc18f906d2714dd26c09150e949431ac1e3d6f21ea27bca0586
|
def make_fes_taddol_ox_dist(dists, temp=791.0, bins=None, save=False, save_format='pdf', save_base_name='ox_dists_fes', display=True, **kwargs):
'Plot the relative free energy surface of O distances in TADDOL'
warn('make_fes_taddol_ox_dist will soon be deprecated. Use Taddol.fes_ox_dists', DeprecationWarning)
delta_gs = []
(fig, axes) = plt.subplots(nrows=2, ncols=2, sharey=True, sharex=True)
handles = []
colors = mpl.rcParams['axes.prop_cycle'].by_key().values()[0]
for i in range(3):
(delta_g, bin_mids) = calc_fes_1d(dists[(:, (1 + i))], temp=temp, bins=bins)
delta_gs.append(delta_g)
ax = axes.flat[i]
(line,) = ax.plot(bin_mids, delta_g, colors[i], **kwargs)
handles.append(line)
ax.set_ylabel('$\\Delta G$ / (kcal / mol)')
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
axes.flat[3].axis('off')
axes.flat[3].legend(handles, ['O-O', 'O(l)-Cy', 'O(r)-Cy'], loc='center')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
Plot the relative free energy surface of O distances in TADDOL
|
paratemp/coordinate_analysis.py
|
make_fes_taddol_ox_dist
|
theavey/ParaTemp
| 12
|
python
|
def make_fes_taddol_ox_dist(dists, temp=791.0, bins=None, save=False, save_format='pdf', save_base_name='ox_dists_fes', display=True, **kwargs):
warn('make_fes_taddol_ox_dist will soon be deprecated. Use Taddol.fes_ox_dists', DeprecationWarning)
delta_gs = []
(fig, axes) = plt.subplots(nrows=2, ncols=2, sharey=True, sharex=True)
handles = []
colors = mpl.rcParams['axes.prop_cycle'].by_key().values()[0]
for i in range(3):
(delta_g, bin_mids) = calc_fes_1d(dists[(:, (1 + i))], temp=temp, bins=bins)
delta_gs.append(delta_g)
ax = axes.flat[i]
(line,) = ax.plot(bin_mids, delta_g, colors[i], **kwargs)
handles.append(line)
ax.set_ylabel('$\\Delta G$ / (kcal / mol)')
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
axes.flat[3].axis('off')
axes.flat[3].legend(handles, ['O-O', 'O(l)-Cy', 'O(r)-Cy'], loc='center')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
def make_fes_taddol_ox_dist(dists, temp=791.0, bins=None, save=False, save_format='pdf', save_base_name='ox_dists_fes', display=True, **kwargs):
warn('make_fes_taddol_ox_dist will soon be deprecated. Use Taddol.fes_ox_dists', DeprecationWarning)
delta_gs = []
(fig, axes) = plt.subplots(nrows=2, ncols=2, sharey=True, sharex=True)
handles = []
colors = mpl.rcParams['axes.prop_cycle'].by_key().values()[0]
for i in range(3):
(delta_g, bin_mids) = calc_fes_1d(dists[(:, (1 + i))], temp=temp, bins=bins)
delta_gs.append(delta_g)
ax = axes.flat[i]
(line,) = ax.plot(bin_mids, delta_g, colors[i], **kwargs)
handles.append(line)
ax.set_ylabel('$\\Delta G$ / (kcal / mol)')
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
axes.flat[3].axis('off')
axes.flat[3].legend(handles, ['O-O', 'O(l)-Cy', 'O(r)-Cy'], loc='center')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None<|docstring|>Plot the relative free energy surface of O distances in TADDOL<|endoftext|>
|
a8bc3ff27cf84e4c3c281ea8527151920b14d6f1c54b3260f76062a6ec445384
|
def select_open_closed_dists(dists, cutoffs=((1.0, 3.25), (3.75, 10.0))):
'\n Select the coordinates for open vs. closed TADDOL\n\n :param dists:\n :param cutoffs:\n :return:\n '
warn('select_open_closed_dists will soon be deprecated. Use Taddol.calc_open_closed', DeprecationWarning)
cut_closed = cutoffs[0]
cut_open = cutoffs[1]
set_open = []
set_closed = []
for ts in dists:
if (cut_open[0] <= ts[1] <= cut_open[1]):
set_open.append(ts)
if (cut_closed[0] <= ts[1] <= cut_closed[1]):
set_closed.append(ts)
columns = ['Time', 'O-O', 'Ol-Cy', 'Or-Cy']
return (pd.DataFrame(set_open, columns=columns), pd.DataFrame(set_closed, columns=columns))
|
Select the coordinates for open vs. closed TADDOL
:param dists:
:param cutoffs:
:return:
|
paratemp/coordinate_analysis.py
|
select_open_closed_dists
|
theavey/ParaTemp
| 12
|
python
|
def select_open_closed_dists(dists, cutoffs=((1.0, 3.25), (3.75, 10.0))):
'\n Select the coordinates for open vs. closed TADDOL\n\n :param dists:\n :param cutoffs:\n :return:\n '
warn('select_open_closed_dists will soon be deprecated. Use Taddol.calc_open_closed', DeprecationWarning)
cut_closed = cutoffs[0]
cut_open = cutoffs[1]
set_open = []
set_closed = []
for ts in dists:
if (cut_open[0] <= ts[1] <= cut_open[1]):
set_open.append(ts)
if (cut_closed[0] <= ts[1] <= cut_closed[1]):
set_closed.append(ts)
columns = ['Time', 'O-O', 'Ol-Cy', 'Or-Cy']
return (pd.DataFrame(set_open, columns=columns), pd.DataFrame(set_closed, columns=columns))
|
def select_open_closed_dists(dists, cutoffs=((1.0, 3.25), (3.75, 10.0))):
'\n Select the coordinates for open vs. closed TADDOL\n\n :param dists:\n :param cutoffs:\n :return:\n '
warn('select_open_closed_dists will soon be deprecated. Use Taddol.calc_open_closed', DeprecationWarning)
cut_closed = cutoffs[0]
cut_open = cutoffs[1]
set_open = []
set_closed = []
for ts in dists:
if (cut_open[0] <= ts[1] <= cut_open[1]):
set_open.append(ts)
if (cut_closed[0] <= ts[1] <= cut_closed[1]):
set_closed.append(ts)
columns = ['Time', 'O-O', 'Ol-Cy', 'Or-Cy']
return (pd.DataFrame(set_open, columns=columns), pd.DataFrame(set_closed, columns=columns))<|docstring|>Select the coordinates for open vs. closed TADDOL
:param dists:
:param cutoffs:
:return:<|endoftext|>
|
507dcb2e139945f07e2e75db104dc799061abafe633a7ea1799c685f5ecfeca5
|
def __init__(self, *args, **kwargs):
'\n\n :type verbosity: int or bool\n :param verbosity: Default: 1. Setting whether to print details. If in\n the future more levels of verbosity are desired, this may be\n changed to an int.\n\n :param float temp: Default: None. Temperature of this simulation to be\n used for calculating free energy surfaces with weighted histograms.\n :param args:\n :param kwargs:\n '
self._verbosity = kwargs.pop('verbosity', 1)
self.temperature = kwargs.pop('temp', None)
super(Universe, self).__init__(*args, **kwargs)
self._num_frames = self.trajectory.n_frames
self._last_time = self.trajectory.totaltime
self._data = self._init_dataframe()
self._dict_dist_defs = {}
self._dict_dihed_defs = {}
|
:type verbosity: int or bool
:param verbosity: Default: 1. Setting whether to print details. If in
the future more levels of verbosity are desired, this may be
changed to an int.
:param float temp: Default: None. Temperature of this simulation to be
used for calculating free energy surfaces with weighted histograms.
:param args:
:param kwargs:
|
paratemp/coordinate_analysis.py
|
__init__
|
theavey/ParaTemp
| 12
|
python
|
def __init__(self, *args, **kwargs):
'\n\n :type verbosity: int or bool\n :param verbosity: Default: 1. Setting whether to print details. If in\n the future more levels of verbosity are desired, this may be\n changed to an int.\n\n :param float temp: Default: None. Temperature of this simulation to be\n used for calculating free energy surfaces with weighted histograms.\n :param args:\n :param kwargs:\n '
self._verbosity = kwargs.pop('verbosity', 1)
self.temperature = kwargs.pop('temp', None)
super(Universe, self).__init__(*args, **kwargs)
self._num_frames = self.trajectory.n_frames
self._last_time = self.trajectory.totaltime
self._data = self._init_dataframe()
self._dict_dist_defs = {}
self._dict_dihed_defs = {}
|
def __init__(self, *args, **kwargs):
'\n\n :type verbosity: int or bool\n :param verbosity: Default: 1. Setting whether to print details. If in\n the future more levels of verbosity are desired, this may be\n changed to an int.\n\n :param float temp: Default: None. Temperature of this simulation to be\n used for calculating free energy surfaces with weighted histograms.\n :param args:\n :param kwargs:\n '
self._verbosity = kwargs.pop('verbosity', 1)
self.temperature = kwargs.pop('temp', None)
super(Universe, self).__init__(*args, **kwargs)
self._num_frames = self.trajectory.n_frames
self._last_time = self.trajectory.totaltime
self._data = self._init_dataframe()
self._dict_dist_defs = {}
self._dict_dihed_defs = {}<|docstring|>:type verbosity: int or bool
:param verbosity: Default: 1. Setting whether to print details. If in
the future more levels of verbosity are desired, this may be
changed to an int.
:param float temp: Default: None. Temperature of this simulation to be
used for calculating free energy surfaces with weighted histograms.
:param args:
:param kwargs:<|endoftext|>
|
9210cbec181f10e5457f893cede210099608f819849bae61fb7df600ac22fa91
|
def _init_dataframe(self):
'\n Initialize a pandas.DataFrame with Times column.\n\n This uses self._last_time as the final time and self._num_frames as\n the number of rows to put into the DataFrame to be returned.\n This uses np.linspace to make the evenly spaced times that should\n match with the times in the trajectory file.\n\n :return: a DataFrame with one column of Times\n :rtype: pd.DataFrame\n '
return pd.DataFrame(np.linspace(0, self._last_time, num=self._num_frames), columns=['Time'])
|
Initialize a pandas.DataFrame with Times column.
This uses self._last_time as the final time and self._num_frames as
the number of rows to put into the DataFrame to be returned.
This uses np.linspace to make the evenly spaced times that should
match with the times in the trajectory file.
:return: a DataFrame with one column of Times
:rtype: pd.DataFrame
|
paratemp/coordinate_analysis.py
|
_init_dataframe
|
theavey/ParaTemp
| 12
|
python
|
def _init_dataframe(self):
'\n Initialize a pandas.DataFrame with Times column.\n\n This uses self._last_time as the final time and self._num_frames as\n the number of rows to put into the DataFrame to be returned.\n This uses np.linspace to make the evenly spaced times that should\n match with the times in the trajectory file.\n\n :return: a DataFrame with one column of Times\n :rtype: pd.DataFrame\n '
return pd.DataFrame(np.linspace(0, self._last_time, num=self._num_frames), columns=['Time'])
|
def _init_dataframe(self):
'\n Initialize a pandas.DataFrame with Times column.\n\n This uses self._last_time as the final time and self._num_frames as\n the number of rows to put into the DataFrame to be returned.\n This uses np.linspace to make the evenly spaced times that should\n match with the times in the trajectory file.\n\n :return: a DataFrame with one column of Times\n :rtype: pd.DataFrame\n '
return pd.DataFrame(np.linspace(0, self._last_time, num=self._num_frames), columns=['Time'])<|docstring|>Initialize a pandas.DataFrame with Times column.
This uses self._last_time as the final time and self._num_frames as
the number of rows to put into the DataFrame to be returned.
This uses np.linspace to make the evenly spaced times that should
match with the times in the trajectory file.
:return: a DataFrame with one column of Times
:rtype: pd.DataFrame<|endoftext|>
|
c1442dde9bcd233cd782fda62da9553b05f281ead569bdc75d531b4bd9316a93
|
def save_data(self, filename=None, overwrite=False):
"\n Save calculated data to disk\n\n :param str filename: Filename to save the data as. Defaults to the\n name of the trajectory with a '.h5' extension.\n\n :param bool overwrite: Whether to overwrite existing data on disk.\n If it's True, it will completely overwrite the existing data store.\n If it's False, but a store for this time already exists, only new\n columns in self.data will be added to the store, and no data will be\n overwritten.\n\n :return: None\n "
if (filename is None):
filename = (os.path.splitext(self.trajectory.filename)[0] + '.h5')
with pd.HDFStore(filename) as store:
time = (('time_' + str(int((self._last_time / 1000)))) + 'ns')
if (overwrite or (('/' + time) not in store.keys())):
store[time] = self._data
else:
store_cols = store.get_node(time).axis0.read().astype(str)
set_diff_cols = set(self._data.columns).difference(store_cols)
if (not set_diff_cols):
if self._verbosity:
print('No data added to {}[{}]'.format(filename, time))
return
store_df = store[time]
for col in set_diff_cols:
store_df[col] = self._data[col]
store[time] = store_df
if self._verbosity:
print('Saved data to {}[{}]'.format(filename, time))
|
Save calculated data to disk
:param str filename: Filename to save the data as. Defaults to the
name of the trajectory with a '.h5' extension.
:param bool overwrite: Whether to overwrite existing data on disk.
If it's True, it will completely overwrite the existing data store.
If it's False, but a store for this time already exists, only new
columns in self.data will be added to the store, and no data will be
overwritten.
:return: None
|
paratemp/coordinate_analysis.py
|
save_data
|
theavey/ParaTemp
| 12
|
python
|
def save_data(self, filename=None, overwrite=False):
"\n Save calculated data to disk\n\n :param str filename: Filename to save the data as. Defaults to the\n name of the trajectory with a '.h5' extension.\n\n :param bool overwrite: Whether to overwrite existing data on disk.\n If it's True, it will completely overwrite the existing data store.\n If it's False, but a store for this time already exists, only new\n columns in self.data will be added to the store, and no data will be\n overwritten.\n\n :return: None\n "
if (filename is None):
filename = (os.path.splitext(self.trajectory.filename)[0] + '.h5')
with pd.HDFStore(filename) as store:
time = (('time_' + str(int((self._last_time / 1000)))) + 'ns')
if (overwrite or (('/' + time) not in store.keys())):
store[time] = self._data
else:
store_cols = store.get_node(time).axis0.read().astype(str)
set_diff_cols = set(self._data.columns).difference(store_cols)
if (not set_diff_cols):
if self._verbosity:
print('No data added to {}[{}]'.format(filename, time))
return
store_df = store[time]
for col in set_diff_cols:
store_df[col] = self._data[col]
store[time] = store_df
if self._verbosity:
print('Saved data to {}[{}]'.format(filename, time))
|
def save_data(self, filename=None, overwrite=False):
"\n Save calculated data to disk\n\n :param str filename: Filename to save the data as. Defaults to the\n name of the trajectory with a '.h5' extension.\n\n :param bool overwrite: Whether to overwrite existing data on disk.\n If it's True, it will completely overwrite the existing data store.\n If it's False, but a store for this time already exists, only new\n columns in self.data will be added to the store, and no data will be\n overwritten.\n\n :return: None\n "
if (filename is None):
filename = (os.path.splitext(self.trajectory.filename)[0] + '.h5')
with pd.HDFStore(filename) as store:
time = (('time_' + str(int((self._last_time / 1000)))) + 'ns')
if (overwrite or (('/' + time) not in store.keys())):
store[time] = self._data
else:
store_cols = store.get_node(time).axis0.read().astype(str)
set_diff_cols = set(self._data.columns).difference(store_cols)
if (not set_diff_cols):
if self._verbosity:
print('No data added to {}[{}]'.format(filename, time))
return
store_df = store[time]
for col in set_diff_cols:
store_df[col] = self._data[col]
store[time] = store_df
if self._verbosity:
print('Saved data to {}[{}]'.format(filename, time))<|docstring|>Save calculated data to disk
:param str filename: Filename to save the data as. Defaults to the
name of the trajectory with a '.h5' extension.
:param bool overwrite: Whether to overwrite existing data on disk.
If it's True, it will completely overwrite the existing data store.
If it's False, but a store for this time already exists, only new
columns in self.data will be added to the store, and no data will be
overwritten.
:return: None<|endoftext|>
|
4be19cdea5b941f497ee41c118d5d3184dbe9662250f0050c95b2a5917d214f3
|
def read_data(self, filename=None, ignore_no_data=False):
"\n Read calculated data from disk\n\n This will read the data from disk and add it to self.data. Any\n existing data will not be overwritten.\n\n :param str filename: Filename from which to read the data.\n Defaults to the name of the trajectory with a '.h5' extension.\n :param bool ignore_no_data: Default: False. If True, not having data\n in the file will not raise an error.\n :return: None\n :raises: IOError\n "
if (filename is None):
filename = (os.path.splitext(self.trajectory.filename)[0] + '.h5')
with pd.HDFStore(filename) as store:
time = (('time_' + str(int((self._last_time / 1000)))) + 'ns')
try:
read_df = store[time]
keys_to_read = set(read_df.columns).difference(self._data.columns)
except KeyError:
keys_to_read = []
read_df = pd.DataFrame()
if (not ignore_no_data):
raise IOError('This data does not exist!\n{}[{}]'.format(filename, time))
else:
if self._verbosity:
print('No data to read in {}[{}]'.format(filename, time))
return
for key in keys_to_read:
self._data[key] = read_df[key]
|
Read calculated data from disk
This will read the data from disk and add it to self.data. Any
existing data will not be overwritten.
:param str filename: Filename from which to read the data.
Defaults to the name of the trajectory with a '.h5' extension.
:param bool ignore_no_data: Default: False. If True, not having data
in the file will not raise an error.
:return: None
:raises: IOError
|
paratemp/coordinate_analysis.py
|
read_data
|
theavey/ParaTemp
| 12
|
python
|
def read_data(self, filename=None, ignore_no_data=False):
"\n Read calculated data from disk\n\n This will read the data from disk and add it to self.data. Any\n existing data will not be overwritten.\n\n :param str filename: Filename from which to read the data.\n Defaults to the name of the trajectory with a '.h5' extension.\n :param bool ignore_no_data: Default: False. If True, not having data\n in the file will not raise an error.\n :return: None\n :raises: IOError\n "
if (filename is None):
filename = (os.path.splitext(self.trajectory.filename)[0] + '.h5')
with pd.HDFStore(filename) as store:
time = (('time_' + str(int((self._last_time / 1000)))) + 'ns')
try:
read_df = store[time]
keys_to_read = set(read_df.columns).difference(self._data.columns)
except KeyError:
keys_to_read = []
read_df = pd.DataFrame()
if (not ignore_no_data):
raise IOError('This data does not exist!\n{}[{}]'.format(filename, time))
else:
if self._verbosity:
print('No data to read in {}[{}]'.format(filename, time))
return
for key in keys_to_read:
self._data[key] = read_df[key]
|
def read_data(self, filename=None, ignore_no_data=False):
"\n Read calculated data from disk\n\n This will read the data from disk and add it to self.data. Any\n existing data will not be overwritten.\n\n :param str filename: Filename from which to read the data.\n Defaults to the name of the trajectory with a '.h5' extension.\n :param bool ignore_no_data: Default: False. If True, not having data\n in the file will not raise an error.\n :return: None\n :raises: IOError\n "
if (filename is None):
filename = (os.path.splitext(self.trajectory.filename)[0] + '.h5')
with pd.HDFStore(filename) as store:
time = (('time_' + str(int((self._last_time / 1000)))) + 'ns')
try:
read_df = store[time]
keys_to_read = set(read_df.columns).difference(self._data.columns)
except KeyError:
keys_to_read = []
read_df = pd.DataFrame()
if (not ignore_no_data):
raise IOError('This data does not exist!\n{}[{}]'.format(filename, time))
else:
if self._verbosity:
print('No data to read in {}[{}]'.format(filename, time))
return
for key in keys_to_read:
self._data[key] = read_df[key]<|docstring|>Read calculated data from disk
This will read the data from disk and add it to self.data. Any
existing data will not be overwritten.
:param str filename: Filename from which to read the data.
Defaults to the name of the trajectory with a '.h5' extension.
:param bool ignore_no_data: Default: False. If True, not having data
in the file will not raise an error.
:return: None
:raises: IOError<|endoftext|>
|
ccb678bb264f726d04b2af222fb886b84512a394cf847e19ebbb2c4405c39f4c
|
def calculate_distances(self, *args, recalculate=False, ignore_file_change=False, read_data=True, save_data=True, **kwargs):
'\n Calculate distances by iterating through the trajectory\n\n :param recalculate: Default: False. If True, all requested\n distances will be calculated.\n If False, the intersection of the set of requested distance names\n and the existing column titles in self.data will be removed from the\n information to be calculated.\n :param ignore_file_change: Default: False. If True, if the file has\n changed since object instantiation, no error will be raised and\n only information through the last frame when the object was\n instantiated will be calculated. If self._verbosity, the fact that\n the file has changed will be printed.\n If False, if the length of the trajectory has changed,\n FileChangedError will be raised.\n :param bool read_data: Default: True.\n If True, :func:`read_data` will be used to read any data in the\n default file with `ignore_no_data=True`.\n :param bool save_data: Default: True.\n If True, :func:`save_data` will be used to save the calculated\n distances to the default file.\n Nothing will be saved if there is nothing new to calculate.\n :param args:\n :param kwargs:\n :return: None\n :raises: FileChangedError\n :raises: SyntaxError\n :raises: NotImplementedError\n '
if read_data:
v = self._verbosity
self._verbosity = False
self.read_data(ignore_no_data=True)
self._verbosity = v
first_group = self.select_atoms('protein and not protein')
second_group = self.select_atoms('protein and not protein')
column_names = []
groups_CoM = []
column_names_CoM = []
if ((len(args) == 0) and (len(kwargs) == 0)):
args = ['all']
if (len(args) != 0):
kwargs = self._parse_calc_dist_pos_args(args, kwargs)
if (not recalculate):
set_existing_data = set(self.data.columns)
set_new_data = set(kwargs.keys())
set_overlap = set_existing_data.intersection(set_new_data)
for col in set_overlap:
del kwargs[col]
if (len(kwargs) != 0):
for key in kwargs:
try:
atoms = kwargs[key].split()
except AttributeError:
atoms = kwargs[key]
if (len(atoms) != 2):
raise SyntaxError('This input should split to two atom indices: {}'.format(kwargs[key]))
try:
[int(atom) for atom in atoms]
except ValueError:
raise NotImplementedError('Only selection by atom index is currently supported.\nAt your own risk you can try assigning to self._data[{}].'.format(key))
except TypeError:
selections = []
for g in atoms:
sel_string = ('bynum ' + ' '.join([str(i) for i in g]))
selections.append(self.select_atoms(sel_string))
groups_CoM.append(selections)
column_names_CoM.append(key)
continue
first_group += self.select_atoms(('bynum ' + str(atoms[0])))
second_group += self.select_atoms(('bynum ' + str(atoms[1])))
column_names += [key]
else:
if self._verbosity:
print('Nothing (new) to calculate here.')
return
n1 = first_group.n_atoms
n2 = second_group.n_atoms
nc = len(column_names)
if (not (nc == n1 == n2)):
raise SyntaxError(('Different numbers of atom selections or number of column labels ({}, {}, and {}, respectively).'.format(n1, n2, nc) + '\nThis should not happen.\nPossibly invalid atom selection.'))
n_groups = len(groups_CoM)
n_group_names = len(column_names_CoM)
if (not (n_groups == n_group_names)):
raise SyntaxError('Different numbers of atom groups or number of column labels for CoM calculations({} and {}, respectively).\nThis should not happen.'.format(n_groups, n_group_names))
if (self._num_frames != self.trajectory.n_frames):
if self._verbosity:
print(('Current trajectory has {} frames, '.format(self.trajectory.n_frames) + 'but this object was instantiated with {} frames.'.format(self._num_frames)))
if (not ignore_file_change):
raise FileChangedError()
dists = np.zeros((self._num_frames, (n1 + n_groups)))
positions_1 = np.zeros(((n1 + n_groups), 3), dtype=np.float32)
positions_2 = np.zeros(((n1 + n_groups), 3), dtype=np.float32)
for i in range(self._num_frames):
self.trajectory[i]
positions_1[:n1] = first_group.positions
positions_2[:n1] = second_group.positions
for (j, group) in enumerate(groups_CoM):
positions_1[(n1 + j)] = group[0].center_of_mass()
positions_2[(n1 + j)] = group[1].center_of_mass()
MDa.lib.distances.calc_bonds(positions_1, positions_2, box=self.dimensions, result=dists[i])
for (i, column) in enumerate((column_names + column_names_CoM)):
self._data[column] = dists[(:, i)]
if save_data:
self.save_data()
|
Calculate distances by iterating through the trajectory
:param recalculate: Default: False. If True, all requested
distances will be calculated.
If False, the intersection of the set of requested distance names
and the existing column titles in self.data will be removed from the
information to be calculated.
:param ignore_file_change: Default: False. If True, if the file has
changed since object instantiation, no error will be raised and
only information through the last frame when the object was
instantiated will be calculated. If self._verbosity, the fact that
the file has changed will be printed.
If False, if the length of the trajectory has changed,
FileChangedError will be raised.
:param bool read_data: Default: True.
If True, :func:`read_data` will be used to read any data in the
default file with `ignore_no_data=True`.
:param bool save_data: Default: True.
If True, :func:`save_data` will be used to save the calculated
distances to the default file.
Nothing will be saved if there is nothing new to calculate.
:param args:
:param kwargs:
:return: None
:raises: FileChangedError
:raises: SyntaxError
:raises: NotImplementedError
|
paratemp/coordinate_analysis.py
|
calculate_distances
|
theavey/ParaTemp
| 12
|
python
|
def calculate_distances(self, *args, recalculate=False, ignore_file_change=False, read_data=True, save_data=True, **kwargs):
'\n Calculate distances by iterating through the trajectory\n\n :param recalculate: Default: False. If True, all requested\n distances will be calculated.\n If False, the intersection of the set of requested distance names\n and the existing column titles in self.data will be removed from the\n information to be calculated.\n :param ignore_file_change: Default: False. If True, if the file has\n changed since object instantiation, no error will be raised and\n only information through the last frame when the object was\n instantiated will be calculated. If self._verbosity, the fact that\n the file has changed will be printed.\n If False, if the length of the trajectory has changed,\n FileChangedError will be raised.\n :param bool read_data: Default: True.\n If True, :func:`read_data` will be used to read any data in the\n default file with `ignore_no_data=True`.\n :param bool save_data: Default: True.\n If True, :func:`save_data` will be used to save the calculated\n distances to the default file.\n Nothing will be saved if there is nothing new to calculate.\n :param args:\n :param kwargs:\n :return: None\n :raises: FileChangedError\n :raises: SyntaxError\n :raises: NotImplementedError\n '
if read_data:
v = self._verbosity
self._verbosity = False
self.read_data(ignore_no_data=True)
self._verbosity = v
first_group = self.select_atoms('protein and not protein')
second_group = self.select_atoms('protein and not protein')
column_names = []
groups_CoM = []
column_names_CoM = []
if ((len(args) == 0) and (len(kwargs) == 0)):
args = ['all']
if (len(args) != 0):
kwargs = self._parse_calc_dist_pos_args(args, kwargs)
if (not recalculate):
set_existing_data = set(self.data.columns)
set_new_data = set(kwargs.keys())
set_overlap = set_existing_data.intersection(set_new_data)
for col in set_overlap:
del kwargs[col]
if (len(kwargs) != 0):
for key in kwargs:
try:
atoms = kwargs[key].split()
except AttributeError:
atoms = kwargs[key]
if (len(atoms) != 2):
raise SyntaxError('This input should split to two atom indices: {}'.format(kwargs[key]))
try:
[int(atom) for atom in atoms]
except ValueError:
raise NotImplementedError('Only selection by atom index is currently supported.\nAt your own risk you can try assigning to self._data[{}].'.format(key))
except TypeError:
selections = []
for g in atoms:
sel_string = ('bynum ' + ' '.join([str(i) for i in g]))
selections.append(self.select_atoms(sel_string))
groups_CoM.append(selections)
column_names_CoM.append(key)
continue
first_group += self.select_atoms(('bynum ' + str(atoms[0])))
second_group += self.select_atoms(('bynum ' + str(atoms[1])))
column_names += [key]
else:
if self._verbosity:
print('Nothing (new) to calculate here.')
return
n1 = first_group.n_atoms
n2 = second_group.n_atoms
nc = len(column_names)
if (not (nc == n1 == n2)):
raise SyntaxError(('Different numbers of atom selections or number of column labels ({}, {}, and {}, respectively).'.format(n1, n2, nc) + '\nThis should not happen.\nPossibly invalid atom selection.'))
n_groups = len(groups_CoM)
n_group_names = len(column_names_CoM)
if (not (n_groups == n_group_names)):
raise SyntaxError('Different numbers of atom groups or number of column labels for CoM calculations({} and {}, respectively).\nThis should not happen.'.format(n_groups, n_group_names))
if (self._num_frames != self.trajectory.n_frames):
if self._verbosity:
print(('Current trajectory has {} frames, '.format(self.trajectory.n_frames) + 'but this object was instantiated with {} frames.'.format(self._num_frames)))
if (not ignore_file_change):
raise FileChangedError()
dists = np.zeros((self._num_frames, (n1 + n_groups)))
positions_1 = np.zeros(((n1 + n_groups), 3), dtype=np.float32)
positions_2 = np.zeros(((n1 + n_groups), 3), dtype=np.float32)
for i in range(self._num_frames):
self.trajectory[i]
positions_1[:n1] = first_group.positions
positions_2[:n1] = second_group.positions
for (j, group) in enumerate(groups_CoM):
positions_1[(n1 + j)] = group[0].center_of_mass()
positions_2[(n1 + j)] = group[1].center_of_mass()
MDa.lib.distances.calc_bonds(positions_1, positions_2, box=self.dimensions, result=dists[i])
for (i, column) in enumerate((column_names + column_names_CoM)):
self._data[column] = dists[(:, i)]
if save_data:
self.save_data()
|
def calculate_distances(self, *args, recalculate=False, ignore_file_change=False, read_data=True, save_data=True, **kwargs):
'\n Calculate distances by iterating through the trajectory\n\n :param recalculate: Default: False. If True, all requested\n distances will be calculated.\n If False, the intersection of the set of requested distance names\n and the existing column titles in self.data will be removed from the\n information to be calculated.\n :param ignore_file_change: Default: False. If True, if the file has\n changed since object instantiation, no error will be raised and\n only information through the last frame when the object was\n instantiated will be calculated. If self._verbosity, the fact that\n the file has changed will be printed.\n If False, if the length of the trajectory has changed,\n FileChangedError will be raised.\n :param bool read_data: Default: True.\n If True, :func:`read_data` will be used to read any data in the\n default file with `ignore_no_data=True`.\n :param bool save_data: Default: True.\n If True, :func:`save_data` will be used to save the calculated\n distances to the default file.\n Nothing will be saved if there is nothing new to calculate.\n :param args:\n :param kwargs:\n :return: None\n :raises: FileChangedError\n :raises: SyntaxError\n :raises: NotImplementedError\n '
if read_data:
v = self._verbosity
self._verbosity = False
self.read_data(ignore_no_data=True)
self._verbosity = v
first_group = self.select_atoms('protein and not protein')
second_group = self.select_atoms('protein and not protein')
column_names = []
groups_CoM = []
column_names_CoM = []
if ((len(args) == 0) and (len(kwargs) == 0)):
args = ['all']
if (len(args) != 0):
kwargs = self._parse_calc_dist_pos_args(args, kwargs)
if (not recalculate):
set_existing_data = set(self.data.columns)
set_new_data = set(kwargs.keys())
set_overlap = set_existing_data.intersection(set_new_data)
for col in set_overlap:
del kwargs[col]
if (len(kwargs) != 0):
for key in kwargs:
try:
atoms = kwargs[key].split()
except AttributeError:
atoms = kwargs[key]
if (len(atoms) != 2):
raise SyntaxError('This input should split to two atom indices: {}'.format(kwargs[key]))
try:
[int(atom) for atom in atoms]
except ValueError:
raise NotImplementedError('Only selection by atom index is currently supported.\nAt your own risk you can try assigning to self._data[{}].'.format(key))
except TypeError:
selections = []
for g in atoms:
sel_string = ('bynum ' + ' '.join([str(i) for i in g]))
selections.append(self.select_atoms(sel_string))
groups_CoM.append(selections)
column_names_CoM.append(key)
continue
first_group += self.select_atoms(('bynum ' + str(atoms[0])))
second_group += self.select_atoms(('bynum ' + str(atoms[1])))
column_names += [key]
else:
if self._verbosity:
print('Nothing (new) to calculate here.')
return
n1 = first_group.n_atoms
n2 = second_group.n_atoms
nc = len(column_names)
if (not (nc == n1 == n2)):
raise SyntaxError(('Different numbers of atom selections or number of column labels ({}, {}, and {}, respectively).'.format(n1, n2, nc) + '\nThis should not happen.\nPossibly invalid atom selection.'))
n_groups = len(groups_CoM)
n_group_names = len(column_names_CoM)
if (not (n_groups == n_group_names)):
raise SyntaxError('Different numbers of atom groups or number of column labels for CoM calculations({} and {}, respectively).\nThis should not happen.'.format(n_groups, n_group_names))
if (self._num_frames != self.trajectory.n_frames):
if self._verbosity:
print(('Current trajectory has {} frames, '.format(self.trajectory.n_frames) + 'but this object was instantiated with {} frames.'.format(self._num_frames)))
if (not ignore_file_change):
raise FileChangedError()
dists = np.zeros((self._num_frames, (n1 + n_groups)))
positions_1 = np.zeros(((n1 + n_groups), 3), dtype=np.float32)
positions_2 = np.zeros(((n1 + n_groups), 3), dtype=np.float32)
for i in range(self._num_frames):
self.trajectory[i]
positions_1[:n1] = first_group.positions
positions_2[:n1] = second_group.positions
for (j, group) in enumerate(groups_CoM):
positions_1[(n1 + j)] = group[0].center_of_mass()
positions_2[(n1 + j)] = group[1].center_of_mass()
MDa.lib.distances.calc_bonds(positions_1, positions_2, box=self.dimensions, result=dists[i])
for (i, column) in enumerate((column_names + column_names_CoM)):
self._data[column] = dists[(:, i)]
if save_data:
self.save_data()<|docstring|>Calculate distances by iterating through the trajectory
:param recalculate: Default: False. If True, all requested
distances will be calculated.
If False, the intersection of the set of requested distance names
and the existing column titles in self.data will be removed from the
information to be calculated.
:param ignore_file_change: Default: False. If True, if the file has
changed since object instantiation, no error will be raised and
only information through the last frame when the object was
instantiated will be calculated. If self._verbosity, the fact that
the file has changed will be printed.
If False, if the length of the trajectory has changed,
FileChangedError will be raised.
:param bool read_data: Default: True.
If True, :func:`read_data` will be used to read any data in the
default file with `ignore_no_data=True`.
:param bool save_data: Default: True.
If True, :func:`save_data` will be used to save the calculated
distances to the default file.
Nothing will be saved if there is nothing new to calculate.
:param args:
:param kwargs:
:return: None
:raises: FileChangedError
:raises: SyntaxError
:raises: NotImplementedError<|endoftext|>
|
ecf0c8464684735aec9cced9aca75b22c0b3038a10902485c7fdb0565cc3f5d2
|
def select_frames(self, criteria, name):
"\n Select some of the trajectory frames based on some min/max criteria\n\n This function can select a subset of the frames where the given\n criteria are satisfied by certain values in `self.data` being between\n the given min and max criteria.\n This then returns the frame index numbers where the criteria are\n satisfied.\n The True/False values are saved to a column in `self.data` with the\n given `name` parameter as the column name.\n\n :param dict criteria: The criteria for selecting frames from the\n trajectory.\n This is a dict with distance names (or other columns that will\n be in `Universe.data`) as the keys and the values being a\n List-like of min and max values.\n For example, `{'c1_c2': (1.5, 4.0), 'c1_c3': (2.2, 5.1)}` will\n select frames where 'c1_c2' is between 1.5 and 4.0 and 'c1_c3'\n is between 2.2 and 5.1.\n :param str name: Name for the bool column in `self.data`\n :rtype: numpy.ndarray\n :return: A numpy array of the frame numbers where the criteria are\n satisfied\n "
d = dict()
for key in criteria:
d[(key + '_min')] = (self.data[key] > criteria[key][0])
d[(key + '_max')] = (self.data[key] < criteria[key][1])
self._data[name] = pd.DataFrame(d).all(axis=1)
if self._verbosity:
num = len(self.data[self.data[name]])
plural = ('s' if (num != 1) else '')
print('These criteria include {} frame{}'.format(num, plural))
return np.array(self.data.index[self.data[name]])
|
Select some of the trajectory frames based on some min/max criteria
This function can select a subset of the frames where the given
criteria are satisfied by certain values in `self.data` being between
the given min and max criteria.
This then returns the frame index numbers where the criteria are
satisfied.
The True/False values are saved to a column in `self.data` with the
given `name` parameter as the column name.
:param dict criteria: The criteria for selecting frames from the
trajectory.
This is a dict with distance names (or other columns that will
be in `Universe.data`) as the keys and the values being a
List-like of min and max values.
For example, `{'c1_c2': (1.5, 4.0), 'c1_c3': (2.2, 5.1)}` will
select frames where 'c1_c2' is between 1.5 and 4.0 and 'c1_c3'
is between 2.2 and 5.1.
:param str name: Name for the bool column in `self.data`
:rtype: numpy.ndarray
:return: A numpy array of the frame numbers where the criteria are
satisfied
|
paratemp/coordinate_analysis.py
|
select_frames
|
theavey/ParaTemp
| 12
|
python
|
def select_frames(self, criteria, name):
"\n Select some of the trajectory frames based on some min/max criteria\n\n This function can select a subset of the frames where the given\n criteria are satisfied by certain values in `self.data` being between\n the given min and max criteria.\n This then returns the frame index numbers where the criteria are\n satisfied.\n The True/False values are saved to a column in `self.data` with the\n given `name` parameter as the column name.\n\n :param dict criteria: The criteria for selecting frames from the\n trajectory.\n This is a dict with distance names (or other columns that will\n be in `Universe.data`) as the keys and the values being a\n List-like of min and max values.\n For example, `{'c1_c2': (1.5, 4.0), 'c1_c3': (2.2, 5.1)}` will\n select frames where 'c1_c2' is between 1.5 and 4.0 and 'c1_c3'\n is between 2.2 and 5.1.\n :param str name: Name for the bool column in `self.data`\n :rtype: numpy.ndarray\n :return: A numpy array of the frame numbers where the criteria are\n satisfied\n "
d = dict()
for key in criteria:
d[(key + '_min')] = (self.data[key] > criteria[key][0])
d[(key + '_max')] = (self.data[key] < criteria[key][1])
self._data[name] = pd.DataFrame(d).all(axis=1)
if self._verbosity:
num = len(self.data[self.data[name]])
plural = ('s' if (num != 1) else )
print('These criteria include {} frame{}'.format(num, plural))
return np.array(self.data.index[self.data[name]])
|
def select_frames(self, criteria, name):
"\n Select some of the trajectory frames based on some min/max criteria\n\n This function can select a subset of the frames where the given\n criteria are satisfied by certain values in `self.data` being between\n the given min and max criteria.\n This then returns the frame index numbers where the criteria are\n satisfied.\n The True/False values are saved to a column in `self.data` with the\n given `name` parameter as the column name.\n\n :param dict criteria: The criteria for selecting frames from the\n trajectory.\n This is a dict with distance names (or other columns that will\n be in `Universe.data`) as the keys and the values being a\n List-like of min and max values.\n For example, `{'c1_c2': (1.5, 4.0), 'c1_c3': (2.2, 5.1)}` will\n select frames where 'c1_c2' is between 1.5 and 4.0 and 'c1_c3'\n is between 2.2 and 5.1.\n :param str name: Name for the bool column in `self.data`\n :rtype: numpy.ndarray\n :return: A numpy array of the frame numbers where the criteria are\n satisfied\n "
d = dict()
for key in criteria:
d[(key + '_min')] = (self.data[key] > criteria[key][0])
d[(key + '_max')] = (self.data[key] < criteria[key][1])
self._data[name] = pd.DataFrame(d).all(axis=1)
if self._verbosity:
num = len(self.data[self.data[name]])
plural = ('s' if (num != 1) else )
print('These criteria include {} frame{}'.format(num, plural))
return np.array(self.data.index[self.data[name]])<|docstring|>Select some of the trajectory frames based on some min/max criteria
This function can select a subset of the frames where the given
criteria are satisfied by certain values in `self.data` being between
the given min and max criteria.
This then returns the frame index numbers where the criteria are
satisfied.
The True/False values are saved to a column in `self.data` with the
given `name` parameter as the column name.
:param dict criteria: The criteria for selecting frames from the
trajectory.
This is a dict with distance names (or other columns that will
be in `Universe.data`) as the keys and the values being a
List-like of min and max values.
For example, `{'c1_c2': (1.5, 4.0), 'c1_c3': (2.2, 5.1)}` will
select frames where 'c1_c2' is between 1.5 and 4.0 and 'c1_c3'
is between 2.2 and 5.1.
:param str name: Name for the bool column in `self.data`
:rtype: numpy.ndarray
:return: A numpy array of the frame numbers where the criteria are
satisfied<|endoftext|>
|
c1999fb30bfd7430f1886d7ced1b9a5b3aad7e896d57d0d39d69ba13bed453f2
|
def fes_1d(self, data, bins=None, temp=None, xlabel='distance / $\\mathrm{\\AA}$', ax=None, **kwargs):
"\n Make FES of some time series data\n\n :type data: Iterable or str\n :param data: Data to form the FES from. If a string is given, the data\n will be taken from self.data[data].\n\n :param float temp: Default: None. Temperature for Boltzmann weighting\n calculation.\n If None is provided, the temperature will be taken from\n self.temperature\n\n :type bins: int or Sequence[int or float] or str\n :param bins: Default: None. The bins argument to be passed to\n np.histogram\n\n :param str xlabel: Default: 'distance / $\\mathrm{\\AA}$'. The label for\n the x axis.\n\n :type ax: matplotlib.axes.Axes\n :param ax: Default: None. The axes objects on which to make the plots.\n If None is supplied, new axes objects will be created.\n\n :param kwargs: keyword arguments to pass to the plotter\n\n :rtype: Tuple(np.ndarray, np.ndarray, matplotlib.lines.Line2D,\n matplotlib.figure.Figure, matplotlib.axes.Axes)\n\n :return: The delta G values, the bin centers, the lines object, the\n figure and the axes\n "
_temp = self._parse_temp_input(temp)
_data = self._parse_data_input(data)
return fes_1d(x=_data, temp=_temp, ax=ax, bins=bins, xlabel=xlabel, **kwargs)
|
Make FES of some time series data
:type data: Iterable or str
:param data: Data to form the FES from. If a string is given, the data
will be taken from self.data[data].
:param float temp: Default: None. Temperature for Boltzmann weighting
calculation.
If None is provided, the temperature will be taken from
self.temperature
:type bins: int or Sequence[int or float] or str
:param bins: Default: None. The bins argument to be passed to
np.histogram
:param str xlabel: Default: 'distance / $\mathrm{\AA}$'. The label for
the x axis.
:type ax: matplotlib.axes.Axes
:param ax: Default: None. The axes objects on which to make the plots.
If None is supplied, new axes objects will be created.
:param kwargs: keyword arguments to pass to the plotter
:rtype: Tuple(np.ndarray, np.ndarray, matplotlib.lines.Line2D,
matplotlib.figure.Figure, matplotlib.axes.Axes)
:return: The delta G values, the bin centers, the lines object, the
figure and the axes
|
paratemp/coordinate_analysis.py
|
fes_1d
|
theavey/ParaTemp
| 12
|
python
|
def fes_1d(self, data, bins=None, temp=None, xlabel='distance / $\\mathrm{\\AA}$', ax=None, **kwargs):
"\n Make FES of some time series data\n\n :type data: Iterable or str\n :param data: Data to form the FES from. If a string is given, the data\n will be taken from self.data[data].\n\n :param float temp: Default: None. Temperature for Boltzmann weighting\n calculation.\n If None is provided, the temperature will be taken from\n self.temperature\n\n :type bins: int or Sequence[int or float] or str\n :param bins: Default: None. The bins argument to be passed to\n np.histogram\n\n :param str xlabel: Default: 'distance / $\\mathrm{\\AA}$'. The label for\n the x axis.\n\n :type ax: matplotlib.axes.Axes\n :param ax: Default: None. The axes objects on which to make the plots.\n If None is supplied, new axes objects will be created.\n\n :param kwargs: keyword arguments to pass to the plotter\n\n :rtype: Tuple(np.ndarray, np.ndarray, matplotlib.lines.Line2D,\n matplotlib.figure.Figure, matplotlib.axes.Axes)\n\n :return: The delta G values, the bin centers, the lines object, the\n figure and the axes\n "
_temp = self._parse_temp_input(temp)
_data = self._parse_data_input(data)
return fes_1d(x=_data, temp=_temp, ax=ax, bins=bins, xlabel=xlabel, **kwargs)
|
def fes_1d(self, data, bins=None, temp=None, xlabel='distance / $\\mathrm{\\AA}$', ax=None, **kwargs):
"\n Make FES of some time series data\n\n :type data: Iterable or str\n :param data: Data to form the FES from. If a string is given, the data\n will be taken from self.data[data].\n\n :param float temp: Default: None. Temperature for Boltzmann weighting\n calculation.\n If None is provided, the temperature will be taken from\n self.temperature\n\n :type bins: int or Sequence[int or float] or str\n :param bins: Default: None. The bins argument to be passed to\n np.histogram\n\n :param str xlabel: Default: 'distance / $\\mathrm{\\AA}$'. The label for\n the x axis.\n\n :type ax: matplotlib.axes.Axes\n :param ax: Default: None. The axes objects on which to make the plots.\n If None is supplied, new axes objects will be created.\n\n :param kwargs: keyword arguments to pass to the plotter\n\n :rtype: Tuple(np.ndarray, np.ndarray, matplotlib.lines.Line2D,\n matplotlib.figure.Figure, matplotlib.axes.Axes)\n\n :return: The delta G values, the bin centers, the lines object, the\n figure and the axes\n "
_temp = self._parse_temp_input(temp)
_data = self._parse_data_input(data)
return fes_1d(x=_data, temp=_temp, ax=ax, bins=bins, xlabel=xlabel, **kwargs)<|docstring|>Make FES of some time series data
:type data: Iterable or str
:param data: Data to form the FES from. If a string is given, the data
will be taken from self.data[data].
:param float temp: Default: None. Temperature for Boltzmann weighting
calculation.
If None is provided, the temperature will be taken from
self.temperature
:type bins: int or Sequence[int or float] or str
:param bins: Default: None. The bins argument to be passed to
np.histogram
:param str xlabel: Default: 'distance / $\mathrm{\AA}$'. The label for
the x axis.
:type ax: matplotlib.axes.Axes
:param ax: Default: None. The axes objects on which to make the plots.
If None is supplied, new axes objects will be created.
:param kwargs: keyword arguments to pass to the plotter
:rtype: Tuple(np.ndarray, np.ndarray, matplotlib.lines.Line2D,
matplotlib.figure.Figure, matplotlib.axes.Axes)
:return: The delta G values, the bin centers, the lines object, the
figure and the axes<|endoftext|>
|
1072d7871e9c44ef0710c61d465e85902a2f4656b839e6e95bd9f7265def9ea1
|
def fes_2d(self, x, y, temp=None, ax=None, bins=None, zrange=(0, 20, 11), zfinal=40, n_bins=32, transpose=False, xlabel='x', ylabel='y', scale=True, square=True, **kwargs):
"\n plot FES in 2D along defined values\n\n :type x: Iterable or str\n :param x: Value along x axis to plot. If a string is given, the data\n will be taken from self.data[x].\n :type y: Iterable or str\n :param y: Value along y axis to plot. If a string is given, the data\n will be taken from self.data[y].\n :param float temp: Default: None. Temperature for Boltzmann weighting\n calculation.\n If None is provided, the temperature will be taken from\n self.temperature\n :param matplotlib.axes.Axes ax: Default: None. Axes on which to make\n the FES. If None, a new axes and figure will be created.\n :param Iterable bins: Default: None. The bins to be used for the z\n ranges. If this is not None, zrange and zfinal are ignored.\n :type zrange: Iterable or float\n :param zrange: Default: (0, 20, 11). Input to np.linspace for\n determining contour levels. If a float-like is given, it will be set\n as the max with 11+1 bins. If a len=2 list-like is given, it will be\n used as the min and max with 11+1 bins. Otherwise, the input will\n be used as-is for input to np.linspace.\n :param zfinal: Default: 40. Energy at which to stop coloring the FES.\n Anything above this energy will appear as white.\n :type n_bins: int or (int, int) or (int, np.ndarray) or (np.ndarray,\n int) or (np.ndarray, np.ndarray)\n :param n_bins: Default: 32. Number of bins in x and y for\n histogramming. This uses np.histogram2d which is fairly flexible\n in how the bins can be specified. See `their documentation\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy\n .histogram2d.html>`.\n :param bool transpose: Default: False. Whether to transpose the data\n and axes such that the input x will be along the y axis and the\n inverse. Note, this also makes the xlabel on the y-axis and the\n inverse.\n :param str xlabel: Default: 'x'. Label for x-axis (or y-axis if\n transpose=True).\n :param str ylabel: Default: 'y'. Label for y-axis (or x-axis if\n transpose=True).\n :param bool scale: Default: True. Include a colorbar scale in the\n figure of the axes.\n :param bool square: Default: True.\n If True, the plot will be made square with `ax.set_aspect(\n 'equal', 'box')`.\n If False, the aspect ratio will be the default value.\n :param kwargs: Keyword arguments to pass to the plotting function.\n :return: The delta G values, the bin centers, the contours, the figure,\n and the axes\n :rtype: tuple[np.ndarray, tuple[np.ndarray, np.ndarray],\n matplotlib.contour.QuadContourSet, matplotlib.figure.Figure,\n matplotlib.axes.Axes]\n "
_temp = self._parse_temp_input(temp)
_x = self._parse_data_input(x)
_y = self._parse_data_input(y)
(_bins, vmax) = _parse_z_bin_input(bins, zfinal, zrange)
(delta_g, xmids, ymids) = calc_fes_2d(_x, _y, temp=_temp, bins=n_bins)
(fig, ax) = _parse_ax_input(ax)
if (not transpose):
delta_g = delta_g.transpose()
(_xlabel, _ylabel) = (xlabel, ylabel)
else:
(xmids, ymids) = (ymids, xmids)
(_xlabel, _ylabel) = (ylabel, xlabel)
contours = ax.contourf(xmids, ymids, delta_g, _bins, vmax=vmax, **kwargs)
ax.set_xlabel(_xlabel)
ax.set_ylabel(_ylabel)
if square:
ax.set_aspect('equal', 'box')
if scale:
fig.colorbar(contours, label='kcal / mol')
fig.tight_layout()
return (delta_g, (xmids, ymids), contours, fig, ax)
|
plot FES in 2D along defined values
:type x: Iterable or str
:param x: Value along x axis to plot. If a string is given, the data
will be taken from self.data[x].
:type y: Iterable or str
:param y: Value along y axis to plot. If a string is given, the data
will be taken from self.data[y].
:param float temp: Default: None. Temperature for Boltzmann weighting
calculation.
If None is provided, the temperature will be taken from
self.temperature
:param matplotlib.axes.Axes ax: Default: None. Axes on which to make
the FES. If None, a new axes and figure will be created.
:param Iterable bins: Default: None. The bins to be used for the z
ranges. If this is not None, zrange and zfinal are ignored.
:type zrange: Iterable or float
:param zrange: Default: (0, 20, 11). Input to np.linspace for
determining contour levels. If a float-like is given, it will be set
as the max with 11+1 bins. If a len=2 list-like is given, it will be
used as the min and max with 11+1 bins. Otherwise, the input will
be used as-is for input to np.linspace.
:param zfinal: Default: 40. Energy at which to stop coloring the FES.
Anything above this energy will appear as white.
:type n_bins: int or (int, int) or (int, np.ndarray) or (np.ndarray,
int) or (np.ndarray, np.ndarray)
:param n_bins: Default: 32. Number of bins in x and y for
histogramming. This uses np.histogram2d which is fairly flexible
in how the bins can be specified. See `their documentation
<https://docs.scipy.org/doc/numpy/reference/generated/numpy
.histogram2d.html>`.
:param bool transpose: Default: False. Whether to transpose the data
and axes such that the input x will be along the y axis and the
inverse. Note, this also makes the xlabel on the y-axis and the
inverse.
:param str xlabel: Default: 'x'. Label for x-axis (or y-axis if
transpose=True).
:param str ylabel: Default: 'y'. Label for y-axis (or x-axis if
transpose=True).
:param bool scale: Default: True. Include a colorbar scale in the
figure of the axes.
:param bool square: Default: True.
If True, the plot will be made square with `ax.set_aspect(
'equal', 'box')`.
If False, the aspect ratio will be the default value.
:param kwargs: Keyword arguments to pass to the plotting function.
:return: The delta G values, the bin centers, the contours, the figure,
and the axes
:rtype: tuple[np.ndarray, tuple[np.ndarray, np.ndarray],
matplotlib.contour.QuadContourSet, matplotlib.figure.Figure,
matplotlib.axes.Axes]
|
paratemp/coordinate_analysis.py
|
fes_2d
|
theavey/ParaTemp
| 12
|
python
|
def fes_2d(self, x, y, temp=None, ax=None, bins=None, zrange=(0, 20, 11), zfinal=40, n_bins=32, transpose=False, xlabel='x', ylabel='y', scale=True, square=True, **kwargs):
"\n plot FES in 2D along defined values\n\n :type x: Iterable or str\n :param x: Value along x axis to plot. If a string is given, the data\n will be taken from self.data[x].\n :type y: Iterable or str\n :param y: Value along y axis to plot. If a string is given, the data\n will be taken from self.data[y].\n :param float temp: Default: None. Temperature for Boltzmann weighting\n calculation.\n If None is provided, the temperature will be taken from\n self.temperature\n :param matplotlib.axes.Axes ax: Default: None. Axes on which to make\n the FES. If None, a new axes and figure will be created.\n :param Iterable bins: Default: None. The bins to be used for the z\n ranges. If this is not None, zrange and zfinal are ignored.\n :type zrange: Iterable or float\n :param zrange: Default: (0, 20, 11). Input to np.linspace for\n determining contour levels. If a float-like is given, it will be set\n as the max with 11+1 bins. If a len=2 list-like is given, it will be\n used as the min and max with 11+1 bins. Otherwise, the input will\n be used as-is for input to np.linspace.\n :param zfinal: Default: 40. Energy at which to stop coloring the FES.\n Anything above this energy will appear as white.\n :type n_bins: int or (int, int) or (int, np.ndarray) or (np.ndarray,\n int) or (np.ndarray, np.ndarray)\n :param n_bins: Default: 32. Number of bins in x and y for\n histogramming. This uses np.histogram2d which is fairly flexible\n in how the bins can be specified. See `their documentation\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy\n .histogram2d.html>`.\n :param bool transpose: Default: False. Whether to transpose the data\n and axes such that the input x will be along the y axis and the\n inverse. Note, this also makes the xlabel on the y-axis and the\n inverse.\n :param str xlabel: Default: 'x'. Label for x-axis (or y-axis if\n transpose=True).\n :param str ylabel: Default: 'y'. Label for y-axis (or x-axis if\n transpose=True).\n :param bool scale: Default: True. Include a colorbar scale in the\n figure of the axes.\n :param bool square: Default: True.\n If True, the plot will be made square with `ax.set_aspect(\n 'equal', 'box')`.\n If False, the aspect ratio will be the default value.\n :param kwargs: Keyword arguments to pass to the plotting function.\n :return: The delta G values, the bin centers, the contours, the figure,\n and the axes\n :rtype: tuple[np.ndarray, tuple[np.ndarray, np.ndarray],\n matplotlib.contour.QuadContourSet, matplotlib.figure.Figure,\n matplotlib.axes.Axes]\n "
_temp = self._parse_temp_input(temp)
_x = self._parse_data_input(x)
_y = self._parse_data_input(y)
(_bins, vmax) = _parse_z_bin_input(bins, zfinal, zrange)
(delta_g, xmids, ymids) = calc_fes_2d(_x, _y, temp=_temp, bins=n_bins)
(fig, ax) = _parse_ax_input(ax)
if (not transpose):
delta_g = delta_g.transpose()
(_xlabel, _ylabel) = (xlabel, ylabel)
else:
(xmids, ymids) = (ymids, xmids)
(_xlabel, _ylabel) = (ylabel, xlabel)
contours = ax.contourf(xmids, ymids, delta_g, _bins, vmax=vmax, **kwargs)
ax.set_xlabel(_xlabel)
ax.set_ylabel(_ylabel)
if square:
ax.set_aspect('equal', 'box')
if scale:
fig.colorbar(contours, label='kcal / mol')
fig.tight_layout()
return (delta_g, (xmids, ymids), contours, fig, ax)
|
def fes_2d(self, x, y, temp=None, ax=None, bins=None, zrange=(0, 20, 11), zfinal=40, n_bins=32, transpose=False, xlabel='x', ylabel='y', scale=True, square=True, **kwargs):
"\n plot FES in 2D along defined values\n\n :type x: Iterable or str\n :param x: Value along x axis to plot. If a string is given, the data\n will be taken from self.data[x].\n :type y: Iterable or str\n :param y: Value along y axis to plot. If a string is given, the data\n will be taken from self.data[y].\n :param float temp: Default: None. Temperature for Boltzmann weighting\n calculation.\n If None is provided, the temperature will be taken from\n self.temperature\n :param matplotlib.axes.Axes ax: Default: None. Axes on which to make\n the FES. If None, a new axes and figure will be created.\n :param Iterable bins: Default: None. The bins to be used for the z\n ranges. If this is not None, zrange and zfinal are ignored.\n :type zrange: Iterable or float\n :param zrange: Default: (0, 20, 11). Input to np.linspace for\n determining contour levels. If a float-like is given, it will be set\n as the max with 11+1 bins. If a len=2 list-like is given, it will be\n used as the min and max with 11+1 bins. Otherwise, the input will\n be used as-is for input to np.linspace.\n :param zfinal: Default: 40. Energy at which to stop coloring the FES.\n Anything above this energy will appear as white.\n :type n_bins: int or (int, int) or (int, np.ndarray) or (np.ndarray,\n int) or (np.ndarray, np.ndarray)\n :param n_bins: Default: 32. Number of bins in x and y for\n histogramming. This uses np.histogram2d which is fairly flexible\n in how the bins can be specified. See `their documentation\n <https://docs.scipy.org/doc/numpy/reference/generated/numpy\n .histogram2d.html>`.\n :param bool transpose: Default: False. Whether to transpose the data\n and axes such that the input x will be along the y axis and the\n inverse. Note, this also makes the xlabel on the y-axis and the\n inverse.\n :param str xlabel: Default: 'x'. Label for x-axis (or y-axis if\n transpose=True).\n :param str ylabel: Default: 'y'. Label for y-axis (or x-axis if\n transpose=True).\n :param bool scale: Default: True. Include a colorbar scale in the\n figure of the axes.\n :param bool square: Default: True.\n If True, the plot will be made square with `ax.set_aspect(\n 'equal', 'box')`.\n If False, the aspect ratio will be the default value.\n :param kwargs: Keyword arguments to pass to the plotting function.\n :return: The delta G values, the bin centers, the contours, the figure,\n and the axes\n :rtype: tuple[np.ndarray, tuple[np.ndarray, np.ndarray],\n matplotlib.contour.QuadContourSet, matplotlib.figure.Figure,\n matplotlib.axes.Axes]\n "
_temp = self._parse_temp_input(temp)
_x = self._parse_data_input(x)
_y = self._parse_data_input(y)
(_bins, vmax) = _parse_z_bin_input(bins, zfinal, zrange)
(delta_g, xmids, ymids) = calc_fes_2d(_x, _y, temp=_temp, bins=n_bins)
(fig, ax) = _parse_ax_input(ax)
if (not transpose):
delta_g = delta_g.transpose()
(_xlabel, _ylabel) = (xlabel, ylabel)
else:
(xmids, ymids) = (ymids, xmids)
(_xlabel, _ylabel) = (ylabel, xlabel)
contours = ax.contourf(xmids, ymids, delta_g, _bins, vmax=vmax, **kwargs)
ax.set_xlabel(_xlabel)
ax.set_ylabel(_ylabel)
if square:
ax.set_aspect('equal', 'box')
if scale:
fig.colorbar(contours, label='kcal / mol')
fig.tight_layout()
return (delta_g, (xmids, ymids), contours, fig, ax)<|docstring|>plot FES in 2D along defined values
:type x: Iterable or str
:param x: Value along x axis to plot. If a string is given, the data
will be taken from self.data[x].
:type y: Iterable or str
:param y: Value along y axis to plot. If a string is given, the data
will be taken from self.data[y].
:param float temp: Default: None. Temperature for Boltzmann weighting
calculation.
If None is provided, the temperature will be taken from
self.temperature
:param matplotlib.axes.Axes ax: Default: None. Axes on which to make
the FES. If None, a new axes and figure will be created.
:param Iterable bins: Default: None. The bins to be used for the z
ranges. If this is not None, zrange and zfinal are ignored.
:type zrange: Iterable or float
:param zrange: Default: (0, 20, 11). Input to np.linspace for
determining contour levels. If a float-like is given, it will be set
as the max with 11+1 bins. If a len=2 list-like is given, it will be
used as the min and max with 11+1 bins. Otherwise, the input will
be used as-is for input to np.linspace.
:param zfinal: Default: 40. Energy at which to stop coloring the FES.
Anything above this energy will appear as white.
:type n_bins: int or (int, int) or (int, np.ndarray) or (np.ndarray,
int) or (np.ndarray, np.ndarray)
:param n_bins: Default: 32. Number of bins in x and y for
histogramming. This uses np.histogram2d which is fairly flexible
in how the bins can be specified. See `their documentation
<https://docs.scipy.org/doc/numpy/reference/generated/numpy
.histogram2d.html>`.
:param bool transpose: Default: False. Whether to transpose the data
and axes such that the input x will be along the y axis and the
inverse. Note, this also makes the xlabel on the y-axis and the
inverse.
:param str xlabel: Default: 'x'. Label for x-axis (or y-axis if
transpose=True).
:param str ylabel: Default: 'y'. Label for y-axis (or x-axis if
transpose=True).
:param bool scale: Default: True. Include a colorbar scale in the
figure of the axes.
:param bool square: Default: True.
If True, the plot will be made square with `ax.set_aspect(
'equal', 'box')`.
If False, the aspect ratio will be the default value.
:param kwargs: Keyword arguments to pass to the plotting function.
:return: The delta G values, the bin centers, the contours, the figure,
and the axes
:rtype: tuple[np.ndarray, tuple[np.ndarray, np.ndarray],
matplotlib.contour.QuadContourSet, matplotlib.figure.Figure,
matplotlib.axes.Axes]<|endoftext|>
|
2c0b56e52ec8c9e7086b9217cf9e326717dea83ed678fe71c030fe9efc6ada31
|
def update_num_frames(self, silent=False):
'\n Update number of frames and last time from trajectory file\n\n :param bool silent: Default: False. If True, nothing will be printed.\n :return: None\n '
num_frames = self.trajectory.n_frames
if (num_frames != self._num_frames):
if (self._verbosity and (not silent)):
print(('Updating num of frames from {} to {}'.format(self._num_frames, num_frames) + '\nand the final time.'))
self._num_frames = num_frames
self._last_time = self.trajectory.totaltime
|
Update number of frames and last time from trajectory file
:param bool silent: Default: False. If True, nothing will be printed.
:return: None
|
paratemp/coordinate_analysis.py
|
update_num_frames
|
theavey/ParaTemp
| 12
|
python
|
def update_num_frames(self, silent=False):
'\n Update number of frames and last time from trajectory file\n\n :param bool silent: Default: False. If True, nothing will be printed.\n :return: None\n '
num_frames = self.trajectory.n_frames
if (num_frames != self._num_frames):
if (self._verbosity and (not silent)):
print(('Updating num of frames from {} to {}'.format(self._num_frames, num_frames) + '\nand the final time.'))
self._num_frames = num_frames
self._last_time = self.trajectory.totaltime
|
def update_num_frames(self, silent=False):
'\n Update number of frames and last time from trajectory file\n\n :param bool silent: Default: False. If True, nothing will be printed.\n :return: None\n '
num_frames = self.trajectory.n_frames
if (num_frames != self._num_frames):
if (self._verbosity and (not silent)):
print(('Updating num of frames from {} to {}'.format(self._num_frames, num_frames) + '\nand the final time.'))
self._num_frames = num_frames
self._last_time = self.trajectory.totaltime<|docstring|>Update number of frames and last time from trajectory file
:param bool silent: Default: False. If True, nothing will be printed.
:return: None<|endoftext|>
|
2c0fe33a4cd0d9ddd344bf4c5efbddcf61d996dd6a30192003f0b5cac9f66edc
|
def update_data_len(self, update_time=True, silent=False):
'\n Update the times and length of self.data based on trajectory file\n\n :param update_time: Default: True. If True, self.update_num_frames\n will be used to find the new length and final time of the\n trajectory file. If False, these will just be read from the instance\n variables and not updated based on the trajectory file.\n :param silent: Default: False. If True, nothing will be printed.\n :return: None\n '
if update_time:
self.update_num_frames(silent=True)
if (self._data['Time'].iat[(- 1)] != self._last_time):
old_len = len(self.data)
new_times_df = self._init_dataframe()
self._data = self._data.join(new_times_df.set_index('Time'), on='Time')
if (self._verbosity and (not silent)):
print('Updating data from {} frames to {} frames'.format(old_len, len(self._data)))
elif (self._verbosity and (not silent)):
print('No need to update self.data')
|
Update the times and length of self.data based on trajectory file
:param update_time: Default: True. If True, self.update_num_frames
will be used to find the new length and final time of the
trajectory file. If False, these will just be read from the instance
variables and not updated based on the trajectory file.
:param silent: Default: False. If True, nothing will be printed.
:return: None
|
paratemp/coordinate_analysis.py
|
update_data_len
|
theavey/ParaTemp
| 12
|
python
|
def update_data_len(self, update_time=True, silent=False):
'\n Update the times and length of self.data based on trajectory file\n\n :param update_time: Default: True. If True, self.update_num_frames\n will be used to find the new length and final time of the\n trajectory file. If False, these will just be read from the instance\n variables and not updated based on the trajectory file.\n :param silent: Default: False. If True, nothing will be printed.\n :return: None\n '
if update_time:
self.update_num_frames(silent=True)
if (self._data['Time'].iat[(- 1)] != self._last_time):
old_len = len(self.data)
new_times_df = self._init_dataframe()
self._data = self._data.join(new_times_df.set_index('Time'), on='Time')
if (self._verbosity and (not silent)):
print('Updating data from {} frames to {} frames'.format(old_len, len(self._data)))
elif (self._verbosity and (not silent)):
print('No need to update self.data')
|
def update_data_len(self, update_time=True, silent=False):
'\n Update the times and length of self.data based on trajectory file\n\n :param update_time: Default: True. If True, self.update_num_frames\n will be used to find the new length and final time of the\n trajectory file. If False, these will just be read from the instance\n variables and not updated based on the trajectory file.\n :param silent: Default: False. If True, nothing will be printed.\n :return: None\n '
if update_time:
self.update_num_frames(silent=True)
if (self._data['Time'].iat[(- 1)] != self._last_time):
old_len = len(self.data)
new_times_df = self._init_dataframe()
self._data = self._data.join(new_times_df.set_index('Time'), on='Time')
if (self._verbosity and (not silent)):
print('Updating data from {} frames to {} frames'.format(old_len, len(self._data)))
elif (self._verbosity and (not silent)):
print('No need to update self.data')<|docstring|>Update the times and length of self.data based on trajectory file
:param update_time: Default: True. If True, self.update_num_frames
will be used to find the new length and final time of the
trajectory file. If False, these will just be read from the instance
variables and not updated based on the trajectory file.
:param silent: Default: False. If True, nothing will be printed.
:return: None<|endoftext|>
|
5d3c78868f3a039d49b0366d07f3439de5463e8b9a4aaf64eaec0e8791d8a66d
|
@property
def data(self):
'\n The pd.DataFrame that is the backend to much of the added functions\n\n :return: the distances and properties for this trajectory\n :rtype: pd.DataFrame\n '
return self._data
|
The pd.DataFrame that is the backend to much of the added functions
:return: the distances and properties for this trajectory
:rtype: pd.DataFrame
|
paratemp/coordinate_analysis.py
|
data
|
theavey/ParaTemp
| 12
|
python
|
@property
def data(self):
'\n The pd.DataFrame that is the backend to much of the added functions\n\n :return: the distances and properties for this trajectory\n :rtype: pd.DataFrame\n '
return self._data
|
@property
def data(self):
'\n The pd.DataFrame that is the backend to much of the added functions\n\n :return: the distances and properties for this trajectory\n :rtype: pd.DataFrame\n '
return self._data<|docstring|>The pd.DataFrame that is the backend to much of the added functions
:return: the distances and properties for this trajectory
:rtype: pd.DataFrame<|endoftext|>
|
ed1e40d74a5f9dde31fcefdd3282c941606e2ba95dd9335d1ca0ec5f12b38e6f
|
def __init__(self, *args, **kwargs):
'\n\n :param verbosity: Setting whether to print details. If in the\n future more levels of verbosity are desired, this may be\n changed to an int.\n Default: 1\n :type verbosity: int or bool\n :param oc_cutoffs: Cutoffs of O-O distance for determining\n open/closed TADDOL configurations. Default: ((1.0, 3.25),\n (3.75, 10.0))\n :type oc_cutoffs: Iterable(Iterable(float, float),\n Iterable(float, float))\n :param args:\n :param kwargs:\n '
self._oc_cutoffs = kwargs.pop('oc_cutoffs', ((1.0, 3.25), (3.75, 10.0)))
super(Taddol, self).__init__(*args, **kwargs)
self._dict_dist_defs = {'ox': {'O-O': (7, 9), 'O(l)-Cy': (9, 13), 'O(r)-Cy': (7, 13)}, 'cv': {'CV1': (160, 9), 'CV2': (133, 8)}}
self._dict_dihed_defs = {}
|
:param verbosity: Setting whether to print details. If in the
future more levels of verbosity are desired, this may be
changed to an int.
Default: 1
:type verbosity: int or bool
:param oc_cutoffs: Cutoffs of O-O distance for determining
open/closed TADDOL configurations. Default: ((1.0, 3.25),
(3.75, 10.0))
:type oc_cutoffs: Iterable(Iterable(float, float),
Iterable(float, float))
:param args:
:param kwargs:
|
paratemp/coordinate_analysis.py
|
__init__
|
theavey/ParaTemp
| 12
|
python
|
def __init__(self, *args, **kwargs):
'\n\n :param verbosity: Setting whether to print details. If in the\n future more levels of verbosity are desired, this may be\n changed to an int.\n Default: 1\n :type verbosity: int or bool\n :param oc_cutoffs: Cutoffs of O-O distance for determining\n open/closed TADDOL configurations. Default: ((1.0, 3.25),\n (3.75, 10.0))\n :type oc_cutoffs: Iterable(Iterable(float, float),\n Iterable(float, float))\n :param args:\n :param kwargs:\n '
self._oc_cutoffs = kwargs.pop('oc_cutoffs', ((1.0, 3.25), (3.75, 10.0)))
super(Taddol, self).__init__(*args, **kwargs)
self._dict_dist_defs = {'ox': {'O-O': (7, 9), 'O(l)-Cy': (9, 13), 'O(r)-Cy': (7, 13)}, 'cv': {'CV1': (160, 9), 'CV2': (133, 8)}}
self._dict_dihed_defs = {}
|
def __init__(self, *args, **kwargs):
'\n\n :param verbosity: Setting whether to print details. If in the\n future more levels of verbosity are desired, this may be\n changed to an int.\n Default: 1\n :type verbosity: int or bool\n :param oc_cutoffs: Cutoffs of O-O distance for determining\n open/closed TADDOL configurations. Default: ((1.0, 3.25),\n (3.75, 10.0))\n :type oc_cutoffs: Iterable(Iterable(float, float),\n Iterable(float, float))\n :param args:\n :param kwargs:\n '
self._oc_cutoffs = kwargs.pop('oc_cutoffs', ((1.0, 3.25), (3.75, 10.0)))
super(Taddol, self).__init__(*args, **kwargs)
self._dict_dist_defs = {'ox': {'O-O': (7, 9), 'O(l)-Cy': (9, 13), 'O(r)-Cy': (7, 13)}, 'cv': {'CV1': (160, 9), 'CV2': (133, 8)}}
self._dict_dihed_defs = {}<|docstring|>:param verbosity: Setting whether to print details. If in the
future more levels of verbosity are desired, this may be
changed to an int.
Default: 1
:type verbosity: int or bool
:param oc_cutoffs: Cutoffs of O-O distance for determining
open/closed TADDOL configurations. Default: ((1.0, 3.25),
(3.75, 10.0))
:type oc_cutoffs: Iterable(Iterable(float, float),
Iterable(float, float))
:param args:
:param kwargs:<|endoftext|>
|
dc3c6c7c576ce75cc27502c093a3dcba872985cb0b8cb9e5586748000aff609c
|
@property
def ox_dists(self):
'\n oxygen distances property\n\n :return:\n '
try:
self._data['O-O']
except KeyError:
if self._verbosity:
print('Calculating oxygen distances...\nThis may take a few minutes.')
self.calculate_distances('ox')
return self._data.filter(('O-O', 'O(l)-Cy', 'O(r)-Cy'))
|
oxygen distances property
:return:
|
paratemp/coordinate_analysis.py
|
ox_dists
|
theavey/ParaTemp
| 12
|
python
|
@property
def ox_dists(self):
'\n oxygen distances property\n\n :return:\n '
try:
self._data['O-O']
except KeyError:
if self._verbosity:
print('Calculating oxygen distances...\nThis may take a few minutes.')
self.calculate_distances('ox')
return self._data.filter(('O-O', 'O(l)-Cy', 'O(r)-Cy'))
|
@property
def ox_dists(self):
'\n oxygen distances property\n\n :return:\n '
try:
self._data['O-O']
except KeyError:
if self._verbosity:
print('Calculating oxygen distances...\nThis may take a few minutes.')
self.calculate_distances('ox')
return self._data.filter(('O-O', 'O(l)-Cy', 'O(r)-Cy'))<|docstring|>oxygen distances property
:return:<|endoftext|>
|
81344d80e2a77579ba1273cb0499def9edb8fda048ea70c73b5933a325cdd672
|
@property
def pi_dists(self):
'\n pi distances property\n\n :return:\n '
try:
self._data['pi-0']
except KeyError:
if self._verbosity:
print('Calculating pi distances...\nThis may take a few minutes.')
self.calculate_distances('pi')
return self._data.filter([('pi-' + str(i)) for i in range(16)])
|
pi distances property
:return:
|
paratemp/coordinate_analysis.py
|
pi_dists
|
theavey/ParaTemp
| 12
|
python
|
@property
def pi_dists(self):
'\n pi distances property\n\n :return:\n '
try:
self._data['pi-0']
except KeyError:
if self._verbosity:
print('Calculating pi distances...\nThis may take a few minutes.')
self.calculate_distances('pi')
return self._data.filter([('pi-' + str(i)) for i in range(16)])
|
@property
def pi_dists(self):
'\n pi distances property\n\n :return:\n '
try:
self._data['pi-0']
except KeyError:
if self._verbosity:
print('Calculating pi distances...\nThis may take a few minutes.')
self.calculate_distances('pi')
return self._data.filter([('pi-' + str(i)) for i in range(16)])<|docstring|>pi distances property
:return:<|endoftext|>
|
c0de8fb366e5030ef93e3f6375a1d4f0eb13d39135d90efbf6ea0db79485c6e0
|
@property
def open_ox_dists(self):
'\n oxygen distances in a open TADDOL configuration\n\n :return:\n '
try:
self._data['open_TAD']
except KeyError:
if self._verbosity:
print('Finding open/closed configurations...')
self.calc_open_closed()
return self._data[self._data['open_TAD']].filter(('O-O', 'O(l)-Cy', 'O(r)-Cy'))
|
oxygen distances in a open TADDOL configuration
:return:
|
paratemp/coordinate_analysis.py
|
open_ox_dists
|
theavey/ParaTemp
| 12
|
python
|
@property
def open_ox_dists(self):
'\n oxygen distances in a open TADDOL configuration\n\n :return:\n '
try:
self._data['open_TAD']
except KeyError:
if self._verbosity:
print('Finding open/closed configurations...')
self.calc_open_closed()
return self._data[self._data['open_TAD']].filter(('O-O', 'O(l)-Cy', 'O(r)-Cy'))
|
@property
def open_ox_dists(self):
'\n oxygen distances in a open TADDOL configuration\n\n :return:\n '
try:
self._data['open_TAD']
except KeyError:
if self._verbosity:
print('Finding open/closed configurations...')
self.calc_open_closed()
return self._data[self._data['open_TAD']].filter(('O-O', 'O(l)-Cy', 'O(r)-Cy'))<|docstring|>oxygen distances in a open TADDOL configuration
:return:<|endoftext|>
|
6ae9344b0fd8197b462c1975910c4d08df4ce527bc6be506e83cae53f58baabe
|
@property
def closed_ox_dists(self):
'\n oxygen distances in a closed TADDOL configuration\n\n :return:\n '
try:
self._data['closed_TAD']
except KeyError:
if self._verbosity:
print('Finding open/closed configurations...')
self.calc_open_closed()
return self._data[self._data['closed_TAD']].filter(('O-O', 'O(l)-Cy', 'O(r)-Cy'))
|
oxygen distances in a closed TADDOL configuration
:return:
|
paratemp/coordinate_analysis.py
|
closed_ox_dists
|
theavey/ParaTemp
| 12
|
python
|
@property
def closed_ox_dists(self):
'\n oxygen distances in a closed TADDOL configuration\n\n :return:\n '
try:
self._data['closed_TAD']
except KeyError:
if self._verbosity:
print('Finding open/closed configurations...')
self.calc_open_closed()
return self._data[self._data['closed_TAD']].filter(('O-O', 'O(l)-Cy', 'O(r)-Cy'))
|
@property
def closed_ox_dists(self):
'\n oxygen distances in a closed TADDOL configuration\n\n :return:\n '
try:
self._data['closed_TAD']
except KeyError:
if self._verbosity:
print('Finding open/closed configurations...')
self.calc_open_closed()
return self._data[self._data['closed_TAD']].filter(('O-O', 'O(l)-Cy', 'O(r)-Cy'))<|docstring|>oxygen distances in a closed TADDOL configuration
:return:<|endoftext|>
|
8dfa31597628afacbe802f2dc088f8f13d980cbe2f4467f161d13e4dc05d97c4
|
@property
def oc_cutoffs(self):
'\n Cutoffs for O-O distance for determining open/closed TADDOL configs\n\n :return:\n '
return self._oc_cutoffs
|
Cutoffs for O-O distance for determining open/closed TADDOL configs
:return:
|
paratemp/coordinate_analysis.py
|
oc_cutoffs
|
theavey/ParaTemp
| 12
|
python
|
@property
def oc_cutoffs(self):
'\n Cutoffs for O-O distance for determining open/closed TADDOL configs\n\n :return:\n '
return self._oc_cutoffs
|
@property
def oc_cutoffs(self):
'\n Cutoffs for O-O distance for determining open/closed TADDOL configs\n\n :return:\n '
return self._oc_cutoffs<|docstring|>Cutoffs for O-O distance for determining open/closed TADDOL configs
:return:<|endoftext|>
|
b6754034b229f56024b08992b2b467fe709bbce1c20dd6fb027d39231d21c132
|
def calc_open_closed(self):
'\n Select the coordinates for open vs. closed TADDOL\n\n :return:\n '
cutoffs = self.oc_cutoffs
cut_closed = cutoffs[0]
cut_open = cutoffs[1]
self._data['closed_TAD'] = self.ox_dists['O-O'].apply((lambda x: (cut_closed[0] <= x <= cut_closed[1])))
self._data['open_TAD'] = self.ox_dists['O-O'].apply((lambda x: (cut_open[0] <= x <= cut_open[1])))
|
Select the coordinates for open vs. closed TADDOL
:return:
|
paratemp/coordinate_analysis.py
|
calc_open_closed
|
theavey/ParaTemp
| 12
|
python
|
def calc_open_closed(self):
'\n Select the coordinates for open vs. closed TADDOL\n\n :return:\n '
cutoffs = self.oc_cutoffs
cut_closed = cutoffs[0]
cut_open = cutoffs[1]
self._data['closed_TAD'] = self.ox_dists['O-O'].apply((lambda x: (cut_closed[0] <= x <= cut_closed[1])))
self._data['open_TAD'] = self.ox_dists['O-O'].apply((lambda x: (cut_open[0] <= x <= cut_open[1])))
|
def calc_open_closed(self):
'\n Select the coordinates for open vs. closed TADDOL\n\n :return:\n '
cutoffs = self.oc_cutoffs
cut_closed = cutoffs[0]
cut_open = cutoffs[1]
self._data['closed_TAD'] = self.ox_dists['O-O'].apply((lambda x: (cut_closed[0] <= x <= cut_closed[1])))
self._data['open_TAD'] = self.ox_dists['O-O'].apply((lambda x: (cut_open[0] <= x <= cut_open[1])))<|docstring|>Select the coordinates for open vs. closed TADDOL
:return:<|endoftext|>
|
1ade13f12d772d68d5b382367e03e539c188be246b33fd40b25cfd447a5059f9
|
@property
def cv1_dists(self):
'\n Distances for CV1 during the trajectory\n\n :return: CV1 distances\n :rtype: pd.Series\n '
try:
self._data['CV1']
except KeyError:
print('Calculating CV values...\nThis may take a few minutes.')
self.calculate_distances('cv')
return self._data['CV1']
|
Distances for CV1 during the trajectory
:return: CV1 distances
:rtype: pd.Series
|
paratemp/coordinate_analysis.py
|
cv1_dists
|
theavey/ParaTemp
| 12
|
python
|
@property
def cv1_dists(self):
'\n Distances for CV1 during the trajectory\n\n :return: CV1 distances\n :rtype: pd.Series\n '
try:
self._data['CV1']
except KeyError:
print('Calculating CV values...\nThis may take a few minutes.')
self.calculate_distances('cv')
return self._data['CV1']
|
@property
def cv1_dists(self):
'\n Distances for CV1 during the trajectory\n\n :return: CV1 distances\n :rtype: pd.Series\n '
try:
self._data['CV1']
except KeyError:
print('Calculating CV values...\nThis may take a few minutes.')
self.calculate_distances('cv')
return self._data['CV1']<|docstring|>Distances for CV1 during the trajectory
:return: CV1 distances
:rtype: pd.Series<|endoftext|>
|
37014a98df034848d4a88ebaedc2f220e529fb09189e450766aaeeb92162f842
|
@property
def cv2_dists(self):
'\n Distances for CV2 during the trajectory\n\n :return: CV2 distances\n :rtype: pd.Series\n '
try:
self._data['CV2']
except KeyError:
print('Calculating CV values...\nThis may take a few minutes.')
self.calculate_distances('cv')
return self._data['CV2']
|
Distances for CV2 during the trajectory
:return: CV2 distances
:rtype: pd.Series
|
paratemp/coordinate_analysis.py
|
cv2_dists
|
theavey/ParaTemp
| 12
|
python
|
@property
def cv2_dists(self):
'\n Distances for CV2 during the trajectory\n\n :return: CV2 distances\n :rtype: pd.Series\n '
try:
self._data['CV2']
except KeyError:
print('Calculating CV values...\nThis may take a few minutes.')
self.calculate_distances('cv')
return self._data['CV2']
|
@property
def cv2_dists(self):
'\n Distances for CV2 during the trajectory\n\n :return: CV2 distances\n :rtype: pd.Series\n '
try:
self._data['CV2']
except KeyError:
print('Calculating CV values...\nThis may take a few minutes.')
self.calculate_distances('cv')
return self._data['CV2']<|docstring|>Distances for CV2 during the trajectory
:return: CV2 distances
:rtype: pd.Series<|endoftext|>
|
66af8fdd3b1da1f13131f09933d2def672ddb69a6e5a3373515dce2401cca12a
|
def fes_2d_cvs(self, x=None, y=None, temp=205.0, xlabel='CV 1', ylabel='CV 2', **kwargs):
"\n plot FES in 2D along defined CVs\n\n See also documentation for :func:`Universe.fes_2d`.\n\n :param Iterable x: Default: self.cv1_dists. Length component to plot\n along x axis.\n :param Iterable y: Default: self.cv2_dists. Length component to plot\n along y axis.\n :param float temp: Default: 205. Temperature for Boltzmann weighting\n calculation.\n :param str xlabel: Default: 'CV 1'. Label for x-axis (or y-axis if\n transpose=True).\n :param str ylabel: Default: 'CV 2'. Label for y-axis (or x-axis if\n transpose=True).\n :param kwargs: Keyword arguments to pass to the plotting function.\n :return: The figure of the FES.\n :rtype: matplotlib.figure.Figure\n "
if (x is None):
x = self.cv1_dists
if (y is None):
y = self.cv2_dists
fig = self.fes_2d(x=x, y=y, temp=temp, xlabel=xlabel, ylabel=ylabel, **kwargs)[(- 2)]
return fig
|
plot FES in 2D along defined CVs
See also documentation for :func:`Universe.fes_2d`.
:param Iterable x: Default: self.cv1_dists. Length component to plot
along x axis.
:param Iterable y: Default: self.cv2_dists. Length component to plot
along y axis.
:param float temp: Default: 205. Temperature for Boltzmann weighting
calculation.
:param str xlabel: Default: 'CV 1'. Label for x-axis (or y-axis if
transpose=True).
:param str ylabel: Default: 'CV 2'. Label for y-axis (or x-axis if
transpose=True).
:param kwargs: Keyword arguments to pass to the plotting function.
:return: The figure of the FES.
:rtype: matplotlib.figure.Figure
|
paratemp/coordinate_analysis.py
|
fes_2d_cvs
|
theavey/ParaTemp
| 12
|
python
|
def fes_2d_cvs(self, x=None, y=None, temp=205.0, xlabel='CV 1', ylabel='CV 2', **kwargs):
"\n plot FES in 2D along defined CVs\n\n See also documentation for :func:`Universe.fes_2d`.\n\n :param Iterable x: Default: self.cv1_dists. Length component to plot\n along x axis.\n :param Iterable y: Default: self.cv2_dists. Length component to plot\n along y axis.\n :param float temp: Default: 205. Temperature for Boltzmann weighting\n calculation.\n :param str xlabel: Default: 'CV 1'. Label for x-axis (or y-axis if\n transpose=True).\n :param str ylabel: Default: 'CV 2'. Label for y-axis (or x-axis if\n transpose=True).\n :param kwargs: Keyword arguments to pass to the plotting function.\n :return: The figure of the FES.\n :rtype: matplotlib.figure.Figure\n "
if (x is None):
x = self.cv1_dists
if (y is None):
y = self.cv2_dists
fig = self.fes_2d(x=x, y=y, temp=temp, xlabel=xlabel, ylabel=ylabel, **kwargs)[(- 2)]
return fig
|
def fes_2d_cvs(self, x=None, y=None, temp=205.0, xlabel='CV 1', ylabel='CV 2', **kwargs):
"\n plot FES in 2D along defined CVs\n\n See also documentation for :func:`Universe.fes_2d`.\n\n :param Iterable x: Default: self.cv1_dists. Length component to plot\n along x axis.\n :param Iterable y: Default: self.cv2_dists. Length component to plot\n along y axis.\n :param float temp: Default: 205. Temperature for Boltzmann weighting\n calculation.\n :param str xlabel: Default: 'CV 1'. Label for x-axis (or y-axis if\n transpose=True).\n :param str ylabel: Default: 'CV 2'. Label for y-axis (or x-axis if\n transpose=True).\n :param kwargs: Keyword arguments to pass to the plotting function.\n :return: The figure of the FES.\n :rtype: matplotlib.figure.Figure\n "
if (x is None):
x = self.cv1_dists
if (y is None):
y = self.cv2_dists
fig = self.fes_2d(x=x, y=y, temp=temp, xlabel=xlabel, ylabel=ylabel, **kwargs)[(- 2)]
return fig<|docstring|>plot FES in 2D along defined CVs
See also documentation for :func:`Universe.fes_2d`.
:param Iterable x: Default: self.cv1_dists. Length component to plot
along x axis.
:param Iterable y: Default: self.cv2_dists. Length component to plot
along y axis.
:param float temp: Default: 205. Temperature for Boltzmann weighting
calculation.
:param str xlabel: Default: 'CV 1'. Label for x-axis (or y-axis if
transpose=True).
:param str ylabel: Default: 'CV 2'. Label for y-axis (or x-axis if
transpose=True).
:param kwargs: Keyword arguments to pass to the plotting function.
:return: The figure of the FES.
:rtype: matplotlib.figure.Figure<|endoftext|>
|
ef369fe187bc136026a2bf9c1c850c1f59b44a6c5999d74e0654a975adb475ea
|
def plot_ox_dists(self, save=False, save_format='png', save_base_name='ox-dists', display=True, ax=None, **kwargs):
"\n Plot the three oxygen-related distances.\n\n :param bool save: Default: False. Save the figure to disk.\n :param str save_format: Default: 'png'. Format in which to save the\n figure.\n :param str save_base_name: Default: 'ox-dists'. Name for the saved\n figure file.\n :param bool display: Default: True. Return the figure, otherwise\n return None.\n :param matplotlib.axes.Axes ax: Default: None. The axes object on\n which to make the plots. If None is supplied, a new axes object will\n be created.\n :param dict kwargs: Keywords to pass to the plotting function.\n :return: The figure of oxygen distances or None.\n "
ox_dists = self.ox_dists
if (ax is None):
(fig, ax) = plt.subplots()
else:
fig = ax.figure
ax.plot(self._data['Time'], ox_dists['O-O'], label='O-O', **kwargs)
ax.plot(self._data['Time'], ox_dists['O(l)-Cy'], label='O(l)-Cy', **kwargs)
ax.plot(self._data['Time'], ox_dists['O(r)-Cy'], label='O(r)-Cy', **kwargs)
ax.legend()
ax.set_xlabel('time / ps')
ax.set_ylabel('distance / $\\mathrm{\\AA}$')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
Plot the three oxygen-related distances.
:param bool save: Default: False. Save the figure to disk.
:param str save_format: Default: 'png'. Format in which to save the
figure.
:param str save_base_name: Default: 'ox-dists'. Name for the saved
figure file.
:param bool display: Default: True. Return the figure, otherwise
return None.
:param matplotlib.axes.Axes ax: Default: None. The axes object on
which to make the plots. If None is supplied, a new axes object will
be created.
:param dict kwargs: Keywords to pass to the plotting function.
:return: The figure of oxygen distances or None.
|
paratemp/coordinate_analysis.py
|
plot_ox_dists
|
theavey/ParaTemp
| 12
|
python
|
def plot_ox_dists(self, save=False, save_format='png', save_base_name='ox-dists', display=True, ax=None, **kwargs):
"\n Plot the three oxygen-related distances.\n\n :param bool save: Default: False. Save the figure to disk.\n :param str save_format: Default: 'png'. Format in which to save the\n figure.\n :param str save_base_name: Default: 'ox-dists'. Name for the saved\n figure file.\n :param bool display: Default: True. Return the figure, otherwise\n return None.\n :param matplotlib.axes.Axes ax: Default: None. The axes object on\n which to make the plots. If None is supplied, a new axes object will\n be created.\n :param dict kwargs: Keywords to pass to the plotting function.\n :return: The figure of oxygen distances or None.\n "
ox_dists = self.ox_dists
if (ax is None):
(fig, ax) = plt.subplots()
else:
fig = ax.figure
ax.plot(self._data['Time'], ox_dists['O-O'], label='O-O', **kwargs)
ax.plot(self._data['Time'], ox_dists['O(l)-Cy'], label='O(l)-Cy', **kwargs)
ax.plot(self._data['Time'], ox_dists['O(r)-Cy'], label='O(r)-Cy', **kwargs)
ax.legend()
ax.set_xlabel('time / ps')
ax.set_ylabel('distance / $\\mathrm{\\AA}$')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
def plot_ox_dists(self, save=False, save_format='png', save_base_name='ox-dists', display=True, ax=None, **kwargs):
"\n Plot the three oxygen-related distances.\n\n :param bool save: Default: False. Save the figure to disk.\n :param str save_format: Default: 'png'. Format in which to save the\n figure.\n :param str save_base_name: Default: 'ox-dists'. Name for the saved\n figure file.\n :param bool display: Default: True. Return the figure, otherwise\n return None.\n :param matplotlib.axes.Axes ax: Default: None. The axes object on\n which to make the plots. If None is supplied, a new axes object will\n be created.\n :param dict kwargs: Keywords to pass to the plotting function.\n :return: The figure of oxygen distances or None.\n "
ox_dists = self.ox_dists
if (ax is None):
(fig, ax) = plt.subplots()
else:
fig = ax.figure
ax.plot(self._data['Time'], ox_dists['O-O'], label='O-O', **kwargs)
ax.plot(self._data['Time'], ox_dists['O(l)-Cy'], label='O(l)-Cy', **kwargs)
ax.plot(self._data['Time'], ox_dists['O(r)-Cy'], label='O(r)-Cy', **kwargs)
ax.legend()
ax.set_xlabel('time / ps')
ax.set_ylabel('distance / $\\mathrm{\\AA}$')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None<|docstring|>Plot the three oxygen-related distances.
:param bool save: Default: False. Save the figure to disk.
:param str save_format: Default: 'png'. Format in which to save the
figure.
:param str save_base_name: Default: 'ox-dists'. Name for the saved
figure file.
:param bool display: Default: True. Return the figure, otherwise
return None.
:param matplotlib.axes.Axes ax: Default: None. The axes object on
which to make the plots. If None is supplied, a new axes object will
be created.
:param dict kwargs: Keywords to pass to the plotting function.
:return: The figure of oxygen distances or None.<|endoftext|>
|
59f7e23db9ad275c34c8d6175c7c2d611bcdfe49128cb7328b3e0e72c060b6dd
|
def hist_ox_dists(self, data=None, n_bins=10, save=False, save_format='pdf', save_base_name='ox-dists-hist', display=True, axes=None, **kwargs):
"\n Make histogram of alcoholic O distances in TADDOL trajectory\n\n :param data: Default: self.ox_dists. Data to form the histogram from.\n :type data: pd.DataFrame\n :param int n_bins: Default: 10. Number of bins for histograms.\n :param bool save: Default: False. Save the figure to disk.\n :param str save_format: Default: 'pdf'. Format in which to save the\n figure.\n :param str save_base_name: Default: 'ox-dists-hist'. Name for the saved\n figure.\n :param bool display: Default: True. Return the figure from the function\n otherwise return None.\n :param axes: Default: None. The axes objects on\n which to make the plots. If None is supplied, new axes objects will\n be created.\n :param dict kwargs: Keyword arguments to pass to the plotting function.\n :return: The figure of histograms of oxygen distances.\n "
try:
data['O-O']
except KeyError:
raise InputError(data, 'data must be a pd.DataFrame like object with item O-O, O(l)-Cy, and O(r)-Cy.')
except TypeError:
if self._verbosity:
print('Using default data: self.ox_dists.')
data = self.ox_dists
if (axes is None):
(fig, axes) = plt.subplots(nrows=2, ncols=2, sharey=True, sharex=True)
else:
try:
fig = axes.flat[3].figure
except (IndexError, TypeError):
raise InputError('axes={}'.format(axes), 'Input axes must be able to plot at least four things')
except AttributeError:
try:
fig = axes[3].figure
except IndexError:
raise InputError('axes={}'.format(axes), 'Input axes must be able to plot at least four things')
handles = []
colors = mpl.rcParams['axes.prop_cycle'].by_key().values()[0]
for (i, key) in enumerate(('O-O', 'O(l)-Cy', 'O(r)-Cy')):
(n, bins) = np.histogram(data[key], n_bins)
ax = axes.flat[i]
(line,) = ax.plot(bins[:(- 1)], n, colors[i], **kwargs)
handles.append(line)
ax.set_ylabel('count')
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
axes.flat[3].axis('off')
axes.flat[3].legend(handles, ['O-O', 'O(l)-Cy', 'O(r)-Cy'], loc='center')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
Make histogram of alcoholic O distances in TADDOL trajectory
:param data: Default: self.ox_dists. Data to form the histogram from.
:type data: pd.DataFrame
:param int n_bins: Default: 10. Number of bins for histograms.
:param bool save: Default: False. Save the figure to disk.
:param str save_format: Default: 'pdf'. Format in which to save the
figure.
:param str save_base_name: Default: 'ox-dists-hist'. Name for the saved
figure.
:param bool display: Default: True. Return the figure from the function
otherwise return None.
:param axes: Default: None. The axes objects on
which to make the plots. If None is supplied, new axes objects will
be created.
:param dict kwargs: Keyword arguments to pass to the plotting function.
:return: The figure of histograms of oxygen distances.
|
paratemp/coordinate_analysis.py
|
hist_ox_dists
|
theavey/ParaTemp
| 12
|
python
|
def hist_ox_dists(self, data=None, n_bins=10, save=False, save_format='pdf', save_base_name='ox-dists-hist', display=True, axes=None, **kwargs):
"\n Make histogram of alcoholic O distances in TADDOL trajectory\n\n :param data: Default: self.ox_dists. Data to form the histogram from.\n :type data: pd.DataFrame\n :param int n_bins: Default: 10. Number of bins for histograms.\n :param bool save: Default: False. Save the figure to disk.\n :param str save_format: Default: 'pdf'. Format in which to save the\n figure.\n :param str save_base_name: Default: 'ox-dists-hist'. Name for the saved\n figure.\n :param bool display: Default: True. Return the figure from the function\n otherwise return None.\n :param axes: Default: None. The axes objects on\n which to make the plots. If None is supplied, new axes objects will\n be created.\n :param dict kwargs: Keyword arguments to pass to the plotting function.\n :return: The figure of histograms of oxygen distances.\n "
try:
data['O-O']
except KeyError:
raise InputError(data, 'data must be a pd.DataFrame like object with item O-O, O(l)-Cy, and O(r)-Cy.')
except TypeError:
if self._verbosity:
print('Using default data: self.ox_dists.')
data = self.ox_dists
if (axes is None):
(fig, axes) = plt.subplots(nrows=2, ncols=2, sharey=True, sharex=True)
else:
try:
fig = axes.flat[3].figure
except (IndexError, TypeError):
raise InputError('axes={}'.format(axes), 'Input axes must be able to plot at least four things')
except AttributeError:
try:
fig = axes[3].figure
except IndexError:
raise InputError('axes={}'.format(axes), 'Input axes must be able to plot at least four things')
handles = []
colors = mpl.rcParams['axes.prop_cycle'].by_key().values()[0]
for (i, key) in enumerate(('O-O', 'O(l)-Cy', 'O(r)-Cy')):
(n, bins) = np.histogram(data[key], n_bins)
ax = axes.flat[i]
(line,) = ax.plot(bins[:(- 1)], n, colors[i], **kwargs)
handles.append(line)
ax.set_ylabel('count')
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
axes.flat[3].axis('off')
axes.flat[3].legend(handles, ['O-O', 'O(l)-Cy', 'O(r)-Cy'], loc='center')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
def hist_ox_dists(self, data=None, n_bins=10, save=False, save_format='pdf', save_base_name='ox-dists-hist', display=True, axes=None, **kwargs):
"\n Make histogram of alcoholic O distances in TADDOL trajectory\n\n :param data: Default: self.ox_dists. Data to form the histogram from.\n :type data: pd.DataFrame\n :param int n_bins: Default: 10. Number of bins for histograms.\n :param bool save: Default: False. Save the figure to disk.\n :param str save_format: Default: 'pdf'. Format in which to save the\n figure.\n :param str save_base_name: Default: 'ox-dists-hist'. Name for the saved\n figure.\n :param bool display: Default: True. Return the figure from the function\n otherwise return None.\n :param axes: Default: None. The axes objects on\n which to make the plots. If None is supplied, new axes objects will\n be created.\n :param dict kwargs: Keyword arguments to pass to the plotting function.\n :return: The figure of histograms of oxygen distances.\n "
try:
data['O-O']
except KeyError:
raise InputError(data, 'data must be a pd.DataFrame like object with item O-O, O(l)-Cy, and O(r)-Cy.')
except TypeError:
if self._verbosity:
print('Using default data: self.ox_dists.')
data = self.ox_dists
if (axes is None):
(fig, axes) = plt.subplots(nrows=2, ncols=2, sharey=True, sharex=True)
else:
try:
fig = axes.flat[3].figure
except (IndexError, TypeError):
raise InputError('axes={}'.format(axes), 'Input axes must be able to plot at least four things')
except AttributeError:
try:
fig = axes[3].figure
except IndexError:
raise InputError('axes={}'.format(axes), 'Input axes must be able to plot at least four things')
handles = []
colors = mpl.rcParams['axes.prop_cycle'].by_key().values()[0]
for (i, key) in enumerate(('O-O', 'O(l)-Cy', 'O(r)-Cy')):
(n, bins) = np.histogram(data[key], n_bins)
ax = axes.flat[i]
(line,) = ax.plot(bins[:(- 1)], n, colors[i], **kwargs)
handles.append(line)
ax.set_ylabel('count')
ax.set_xlabel('distance / $\\mathrm{\\AA}$')
axes.flat[3].axis('off')
axes.flat[3].legend(handles, ['O-O', 'O(l)-Cy', 'O(r)-Cy'], loc='center')
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None<|docstring|>Make histogram of alcoholic O distances in TADDOL trajectory
:param data: Default: self.ox_dists. Data to form the histogram from.
:type data: pd.DataFrame
:param int n_bins: Default: 10. Number of bins for histograms.
:param bool save: Default: False. Save the figure to disk.
:param str save_format: Default: 'pdf'. Format in which to save the
figure.
:param str save_base_name: Default: 'ox-dists-hist'. Name for the saved
figure.
:param bool display: Default: True. Return the figure from the function
otherwise return None.
:param axes: Default: None. The axes objects on
which to make the plots. If None is supplied, new axes objects will
be created.
:param dict kwargs: Keyword arguments to pass to the plotting function.
:return: The figure of histograms of oxygen distances.<|endoftext|>
|
508407a9b0ad7ccbf53c324ce67d70453a652675b6e6634dffe0ae0f0520441e
|
def fes_ox_dists(self, data=None, temp=791.0, bins=None, save=False, save_format='pdf', save_base_name='ox-dists-fes', display=True, axes=None, **kwargs):
"\n Make FESs of the oxygen distances of a TADDOL from histogram data\n\n :param data: Default: self.ox_dists. Data to form the FES from.\n :type data: pd.DataFrame\n :param float temp: Default: 791 K. Temperature of the trajectory used\n to calculate the free energy.\n :type bins: int or Sequence[int or float] or str\n :param bins: Default: None. The bins argument to be passed to\n np.histogram\n :param bool save: Default: False. Whether to save the FESs to disk.\n :param str save_format: Default: 'pdf'. Format in which to save the\n figure.\n :param str save_base_name: Default: 'ox-dists-fes'. Name of the saved\n figure.\n :param bool display: Default: True. Whether to return the figure after\n producing it.\n :param axes: Default: None. The axes objects on\n which to make the plots. If None is supplied, new axes objects will\n be created.\n :param kwargs: keyword arguments to pass to the plotter\n :return:\n "
try:
data['O-O']
except KeyError:
raise InputError(data, 'data must be a pd.DataFrame like object with items O-O, O(l)-Cy, and O(r)-Cy.')
except TypeError:
if self._verbosity:
print('Using default data: self.ox_dists.')
data = self.ox_dists
fig = fes_array_3_legend(data, temp=temp, labels=('O-O', 'O(l)-Cy', 'O(r)-Cy'), axes=axes, bins=bins, **kwargs)[3]
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
Make FESs of the oxygen distances of a TADDOL from histogram data
:param data: Default: self.ox_dists. Data to form the FES from.
:type data: pd.DataFrame
:param float temp: Default: 791 K. Temperature of the trajectory used
to calculate the free energy.
:type bins: int or Sequence[int or float] or str
:param bins: Default: None. The bins argument to be passed to
np.histogram
:param bool save: Default: False. Whether to save the FESs to disk.
:param str save_format: Default: 'pdf'. Format in which to save the
figure.
:param str save_base_name: Default: 'ox-dists-fes'. Name of the saved
figure.
:param bool display: Default: True. Whether to return the figure after
producing it.
:param axes: Default: None. The axes objects on
which to make the plots. If None is supplied, new axes objects will
be created.
:param kwargs: keyword arguments to pass to the plotter
:return:
|
paratemp/coordinate_analysis.py
|
fes_ox_dists
|
theavey/ParaTemp
| 12
|
python
|
def fes_ox_dists(self, data=None, temp=791.0, bins=None, save=False, save_format='pdf', save_base_name='ox-dists-fes', display=True, axes=None, **kwargs):
"\n Make FESs of the oxygen distances of a TADDOL from histogram data\n\n :param data: Default: self.ox_dists. Data to form the FES from.\n :type data: pd.DataFrame\n :param float temp: Default: 791 K. Temperature of the trajectory used\n to calculate the free energy.\n :type bins: int or Sequence[int or float] or str\n :param bins: Default: None. The bins argument to be passed to\n np.histogram\n :param bool save: Default: False. Whether to save the FESs to disk.\n :param str save_format: Default: 'pdf'. Format in which to save the\n figure.\n :param str save_base_name: Default: 'ox-dists-fes'. Name of the saved\n figure.\n :param bool display: Default: True. Whether to return the figure after\n producing it.\n :param axes: Default: None. The axes objects on\n which to make the plots. If None is supplied, new axes objects will\n be created.\n :param kwargs: keyword arguments to pass to the plotter\n :return:\n "
try:
data['O-O']
except KeyError:
raise InputError(data, 'data must be a pd.DataFrame like object with items O-O, O(l)-Cy, and O(r)-Cy.')
except TypeError:
if self._verbosity:
print('Using default data: self.ox_dists.')
data = self.ox_dists
fig = fes_array_3_legend(data, temp=temp, labels=('O-O', 'O(l)-Cy', 'O(r)-Cy'), axes=axes, bins=bins, **kwargs)[3]
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None
|
def fes_ox_dists(self, data=None, temp=791.0, bins=None, save=False, save_format='pdf', save_base_name='ox-dists-fes', display=True, axes=None, **kwargs):
"\n Make FESs of the oxygen distances of a TADDOL from histogram data\n\n :param data: Default: self.ox_dists. Data to form the FES from.\n :type data: pd.DataFrame\n :param float temp: Default: 791 K. Temperature of the trajectory used\n to calculate the free energy.\n :type bins: int or Sequence[int or float] or str\n :param bins: Default: None. The bins argument to be passed to\n np.histogram\n :param bool save: Default: False. Whether to save the FESs to disk.\n :param str save_format: Default: 'pdf'. Format in which to save the\n figure.\n :param str save_base_name: Default: 'ox-dists-fes'. Name of the saved\n figure.\n :param bool display: Default: True. Whether to return the figure after\n producing it.\n :param axes: Default: None. The axes objects on\n which to make the plots. If None is supplied, new axes objects will\n be created.\n :param kwargs: keyword arguments to pass to the plotter\n :return:\n "
try:
data['O-O']
except KeyError:
raise InputError(data, 'data must be a pd.DataFrame like object with items O-O, O(l)-Cy, and O(r)-Cy.')
except TypeError:
if self._verbosity:
print('Using default data: self.ox_dists.')
data = self.ox_dists
fig = fes_array_3_legend(data, temp=temp, labels=('O-O', 'O(l)-Cy', 'O(r)-Cy'), axes=axes, bins=bins, **kwargs)[3]
if save:
fig.savefig((save_base_name + save_format))
if display:
return fig
else:
return None<|docstring|>Make FESs of the oxygen distances of a TADDOL from histogram data
:param data: Default: self.ox_dists. Data to form the FES from.
:type data: pd.DataFrame
:param float temp: Default: 791 K. Temperature of the trajectory used
to calculate the free energy.
:type bins: int or Sequence[int or float] or str
:param bins: Default: None. The bins argument to be passed to
np.histogram
:param bool save: Default: False. Whether to save the FESs to disk.
:param str save_format: Default: 'pdf'. Format in which to save the
figure.
:param str save_base_name: Default: 'ox-dists-fes'. Name of the saved
figure.
:param bool display: Default: True. Whether to return the figure after
producing it.
:param axes: Default: None. The axes objects on
which to make the plots. If None is supplied, new axes objects will
be created.
:param kwargs: keyword arguments to pass to the plotter
:return:<|endoftext|>
|
03095efd1fb161f279cd2a93e20d28e21fd71a4319b5a965d7fe341082ead1f1
|
def test_emptyResolversList(self):
'\n L{ResolverChain._lookup} returns a L{DomainError} failure if\n its C{resolvers} list is empty.\n '
r = ResolverChain([])
d = r.lookupAddress('www.example.com')
f = self.failureResultOf(d)
self.assertIs(f.trap(DomainError), DomainError)
|
L{ResolverChain._lookup} returns a L{DomainError} failure if
its C{resolvers} list is empty.
|
ThirdParty/Twisted/twisted/names/test/test_resolve.py
|
test_emptyResolversList
|
michaelchanwahyan/vtk-7.0.0
| 9,953
|
python
|
def test_emptyResolversList(self):
'\n L{ResolverChain._lookup} returns a L{DomainError} failure if\n its C{resolvers} list is empty.\n '
r = ResolverChain([])
d = r.lookupAddress('www.example.com')
f = self.failureResultOf(d)
self.assertIs(f.trap(DomainError), DomainError)
|
def test_emptyResolversList(self):
'\n L{ResolverChain._lookup} returns a L{DomainError} failure if\n its C{resolvers} list is empty.\n '
r = ResolverChain([])
d = r.lookupAddress('www.example.com')
f = self.failureResultOf(d)
self.assertIs(f.trap(DomainError), DomainError)<|docstring|>L{ResolverChain._lookup} returns a L{DomainError} failure if
its C{resolvers} list is empty.<|endoftext|>
|
8eb58c5cd8bb065c5f499bce303384ea9abad5895246f602b9c2cc5a65492e67
|
def test_emptyResolversListLookupAllRecords(self):
'\n L{ResolverChain.lookupAllRecords} returns a L{DomainError}\n failure if its C{resolvers} list is empty.\n '
r = ResolverChain([])
d = r.lookupAllRecords('www.example.com')
f = self.failureResultOf(d)
self.assertIs(f.trap(DomainError), DomainError)
|
L{ResolverChain.lookupAllRecords} returns a L{DomainError}
failure if its C{resolvers} list is empty.
|
ThirdParty/Twisted/twisted/names/test/test_resolve.py
|
test_emptyResolversListLookupAllRecords
|
michaelchanwahyan/vtk-7.0.0
| 9,953
|
python
|
def test_emptyResolversListLookupAllRecords(self):
'\n L{ResolverChain.lookupAllRecords} returns a L{DomainError}\n failure if its C{resolvers} list is empty.\n '
r = ResolverChain([])
d = r.lookupAllRecords('www.example.com')
f = self.failureResultOf(d)
self.assertIs(f.trap(DomainError), DomainError)
|
def test_emptyResolversListLookupAllRecords(self):
'\n L{ResolverChain.lookupAllRecords} returns a L{DomainError}\n failure if its C{resolvers} list is empty.\n '
r = ResolverChain([])
d = r.lookupAllRecords('www.example.com')
f = self.failureResultOf(d)
self.assertIs(f.trap(DomainError), DomainError)<|docstring|>L{ResolverChain.lookupAllRecords} returns a L{DomainError}
failure if its C{resolvers} list is empty.<|endoftext|>
|
ad72b70b47017a2d2b7326658d28bd0ea93530014049b013c13ba1ba21567518
|
def copy_all_python_files(source, snapshot_main_dir, code_snapshot_hash, recurse_dirs='metaseq,metaseq_cli,scripts'):
'\n Copies following files from source to destination:\n a) all *.py files at direct source location.\n b) all metaseq/*.py recursively (default); recurse through comma-separated recurse_dirs\n '
def include_patterns(*patterns):
'Factory function that can be used with copytree() ignore parameter.\n\n Arguments define a sequence of glob-style patterns\n that are used to specify what files to NOT ignore.\n Creates and returns a function that determines this for each directory\n in the file hierarchy rooted at the source directory when used with\n shutil.copytree().\n from: https://stackoverflow.com/questions/52071642/python-copying-the-files-with-include-pattern\n '
def _ignore_patterns(path, names):
keep = set((name for pattern in patterns for name in fnmatch.filter(names, pattern)))
ignore = set((name for name in names if ((name not in keep) and (not os.path.isdir(os.path.join(path, name))))))
return ignore
return _ignore_patterns
def pys_but_no_dirs(path, names):
pys = set(fnmatch.filter(names, '*.py'))
return [name for name in names if (name not in pys)]
destination = os.path.join(snapshot_main_dir, code_snapshot_hash)
shutil.copytree(source, destination, ignore=pys_but_no_dirs)
for d in recurse_dirs.split(','):
shutil.copytree(os.path.join(source, d), os.path.join(destination, d), ignore=include_patterns('*.py', '*.so', '*.yaml'))
return destination
|
Copies following files from source to destination:
a) all *.py files at direct source location.
b) all metaseq/*.py recursively (default); recurse through comma-separated recurse_dirs
|
metaseq/launcher/slurm.py
|
copy_all_python_files
|
dedsecurity/gpt-ded
| 3
|
python
|
def copy_all_python_files(source, snapshot_main_dir, code_snapshot_hash, recurse_dirs='metaseq,metaseq_cli,scripts'):
'\n Copies following files from source to destination:\n a) all *.py files at direct source location.\n b) all metaseq/*.py recursively (default); recurse through comma-separated recurse_dirs\n '
def include_patterns(*patterns):
'Factory function that can be used with copytree() ignore parameter.\n\n Arguments define a sequence of glob-style patterns\n that are used to specify what files to NOT ignore.\n Creates and returns a function that determines this for each directory\n in the file hierarchy rooted at the source directory when used with\n shutil.copytree().\n from: https://stackoverflow.com/questions/52071642/python-copying-the-files-with-include-pattern\n '
def _ignore_patterns(path, names):
keep = set((name for pattern in patterns for name in fnmatch.filter(names, pattern)))
ignore = set((name for name in names if ((name not in keep) and (not os.path.isdir(os.path.join(path, name))))))
return ignore
return _ignore_patterns
def pys_but_no_dirs(path, names):
pys = set(fnmatch.filter(names, '*.py'))
return [name for name in names if (name not in pys)]
destination = os.path.join(snapshot_main_dir, code_snapshot_hash)
shutil.copytree(source, destination, ignore=pys_but_no_dirs)
for d in recurse_dirs.split(','):
shutil.copytree(os.path.join(source, d), os.path.join(destination, d), ignore=include_patterns('*.py', '*.so', '*.yaml'))
return destination
|
def copy_all_python_files(source, snapshot_main_dir, code_snapshot_hash, recurse_dirs='metaseq,metaseq_cli,scripts'):
'\n Copies following files from source to destination:\n a) all *.py files at direct source location.\n b) all metaseq/*.py recursively (default); recurse through comma-separated recurse_dirs\n '
def include_patterns(*patterns):
'Factory function that can be used with copytree() ignore parameter.\n\n Arguments define a sequence of glob-style patterns\n that are used to specify what files to NOT ignore.\n Creates and returns a function that determines this for each directory\n in the file hierarchy rooted at the source directory when used with\n shutil.copytree().\n from: https://stackoverflow.com/questions/52071642/python-copying-the-files-with-include-pattern\n '
def _ignore_patterns(path, names):
keep = set((name for pattern in patterns for name in fnmatch.filter(names, pattern)))
ignore = set((name for name in names if ((name not in keep) and (not os.path.isdir(os.path.join(path, name))))))
return ignore
return _ignore_patterns
def pys_but_no_dirs(path, names):
pys = set(fnmatch.filter(names, '*.py'))
return [name for name in names if (name not in pys)]
destination = os.path.join(snapshot_main_dir, code_snapshot_hash)
shutil.copytree(source, destination, ignore=pys_but_no_dirs)
for d in recurse_dirs.split(','):
shutil.copytree(os.path.join(source, d), os.path.join(destination, d), ignore=include_patterns('*.py', '*.so', '*.yaml'))
return destination<|docstring|>Copies following files from source to destination:
a) all *.py files at direct source location.
b) all metaseq/*.py recursively (default); recurse through comma-separated recurse_dirs<|endoftext|>
|
f78f965ec50c89325b18074d3e05ecb4a4cef7417f38cf48c2782df1ac7fe7e4
|
def include_patterns(*patterns):
'Factory function that can be used with copytree() ignore parameter.\n\n Arguments define a sequence of glob-style patterns\n that are used to specify what files to NOT ignore.\n Creates and returns a function that determines this for each directory\n in the file hierarchy rooted at the source directory when used with\n shutil.copytree().\n from: https://stackoverflow.com/questions/52071642/python-copying-the-files-with-include-pattern\n '
def _ignore_patterns(path, names):
keep = set((name for pattern in patterns for name in fnmatch.filter(names, pattern)))
ignore = set((name for name in names if ((name not in keep) and (not os.path.isdir(os.path.join(path, name))))))
return ignore
return _ignore_patterns
|
Factory function that can be used with copytree() ignore parameter.
Arguments define a sequence of glob-style patterns
that are used to specify what files to NOT ignore.
Creates and returns a function that determines this for each directory
in the file hierarchy rooted at the source directory when used with
shutil.copytree().
from: https://stackoverflow.com/questions/52071642/python-copying-the-files-with-include-pattern
|
metaseq/launcher/slurm.py
|
include_patterns
|
dedsecurity/gpt-ded
| 3
|
python
|
def include_patterns(*patterns):
'Factory function that can be used with copytree() ignore parameter.\n\n Arguments define a sequence of glob-style patterns\n that are used to specify what files to NOT ignore.\n Creates and returns a function that determines this for each directory\n in the file hierarchy rooted at the source directory when used with\n shutil.copytree().\n from: https://stackoverflow.com/questions/52071642/python-copying-the-files-with-include-pattern\n '
def _ignore_patterns(path, names):
keep = set((name for pattern in patterns for name in fnmatch.filter(names, pattern)))
ignore = set((name for name in names if ((name not in keep) and (not os.path.isdir(os.path.join(path, name))))))
return ignore
return _ignore_patterns
|
def include_patterns(*patterns):
'Factory function that can be used with copytree() ignore parameter.\n\n Arguments define a sequence of glob-style patterns\n that are used to specify what files to NOT ignore.\n Creates and returns a function that determines this for each directory\n in the file hierarchy rooted at the source directory when used with\n shutil.copytree().\n from: https://stackoverflow.com/questions/52071642/python-copying-the-files-with-include-pattern\n '
def _ignore_patterns(path, names):
keep = set((name for pattern in patterns for name in fnmatch.filter(names, pattern)))
ignore = set((name for name in names if ((name not in keep) and (not os.path.isdir(os.path.join(path, name))))))
return ignore
return _ignore_patterns<|docstring|>Factory function that can be used with copytree() ignore parameter.
Arguments define a sequence of glob-style patterns
that are used to specify what files to NOT ignore.
Creates and returns a function that determines this for each directory
in the file hierarchy rooted at the source directory when used with
shutil.copytree().
from: https://stackoverflow.com/questions/52071642/python-copying-the-files-with-include-pattern<|endoftext|>
|
c5800ec75ec6eb9b6bcda1eb49d9be7eef705d332ac97a81aeb44647272e4dd4
|
def gen_default_lines():
'gen_default_lines.'
template = {'useCountingLine': True, 'countingLines': [{'id': '$UUID_PLACE_HOLDER', 'type': 'Line', 'label': [{'x': 229, 'y': 215}, {'x': 916, 'y': 255}]}]}
template['countingLines'][0]['id'] = str(uuid.uuid4())
return json.dumps(template)
|
gen_default_lines.
|
factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/cameras/migrations/0005_auto_20200922_0641.py
|
gen_default_lines
|
jeanrtak/azure-intelligent-edge-patterns
| 176
|
python
|
def gen_default_lines():
template = {'useCountingLine': True, 'countingLines': [{'id': '$UUID_PLACE_HOLDER', 'type': 'Line', 'label': [{'x': 229, 'y': 215}, {'x': 916, 'y': 255}]}]}
template['countingLines'][0]['id'] = str(uuid.uuid4())
return json.dumps(template)
|
def gen_default_lines():
template = {'useCountingLine': True, 'countingLines': [{'id': '$UUID_PLACE_HOLDER', 'type': 'Line', 'label': [{'x': 229, 'y': 215}, {'x': 916, 'y': 255}]}]}
template['countingLines'][0]['id'] = str(uuid.uuid4())
return json.dumps(template)<|docstring|>gen_default_lines.<|endoftext|>
|
2a058bad22ca3cbede53e6a66a9f662262007feb606bec1fccf65f6e5d4786e6
|
def gen_default_zones():
'gen_default_zones.'
template = {'useDangerZone': True, 'dangerZones': [{'id': '$UUID_PLACE_HOLDER', 'type': 'BBox', 'label': {'x1': 23, 'y1': 58, 'x2': 452, 'y2': 502}}]}
template['dangerZones'][0]['id'] = str(uuid.uuid4())
return json.dumps(template)
|
gen_default_zones.
|
factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/cameras/migrations/0005_auto_20200922_0641.py
|
gen_default_zones
|
jeanrtak/azure-intelligent-edge-patterns
| 176
|
python
|
def gen_default_zones():
template = {'useDangerZone': True, 'dangerZones': [{'id': '$UUID_PLACE_HOLDER', 'type': 'BBox', 'label': {'x1': 23, 'y1': 58, 'x2': 452, 'y2': 502}}]}
template['dangerZones'][0]['id'] = str(uuid.uuid4())
return json.dumps(template)
|
def gen_default_zones():
template = {'useDangerZone': True, 'dangerZones': [{'id': '$UUID_PLACE_HOLDER', 'type': 'BBox', 'label': {'x1': 23, 'y1': 58, 'x2': 452, 'y2': 502}}]}
template['dangerZones'][0]['id'] = str(uuid.uuid4())
return json.dumps(template)<|docstring|>gen_default_zones.<|endoftext|>
|
071ff8b54073ac960d5e1661d249c0f3ddff3447799caab0a129018786124e59
|
def main():
'\n main function to be called when the script is directly executed from the\n command line\n '
parser = optparse.OptionParser()
parser.add_option('--identifier', dest='identifier', default=None, help='Name of the data set that corresponds to the telemetry pull to be executed')
parser.add_option('--inputFile', dest='inputFile', default=None, help='Path of the file containing the config that describes the DefragHistory time series')
parser.add_option('--iterations', dest='iterations', default=None, help='Number of data points to be considered in the time series')
parser.add_option('--debug', dest='debug', default=False, help='Verbose printing for debug use')
(options, args) = parser.parse_args()
input_t = utilityTSV.utilityTSV().checkInputFile(options.inputFile)
debug = utilityTSV.utilityTSV().checkDebugOption(options.debug)
identifier = utilityTSV.utilityTSV().checkIdentifier(options.identifier)
iterations = utilityTSV.utilityTSV().checkIterations(options.iterations)
viz = configToText(debug=debug)
viz.generateTextFromConfigFile(inputFile=input_t, identifier=identifier, iterations=iterations)
return 0
|
main function to be called when the script is directly executed from the
command line
|
src/software/TSV/configToText.py
|
main
|
intel/RAAD
| 0
|
python
|
def main():
'\n main function to be called when the script is directly executed from the\n command line\n '
parser = optparse.OptionParser()
parser.add_option('--identifier', dest='identifier', default=None, help='Name of the data set that corresponds to the telemetry pull to be executed')
parser.add_option('--inputFile', dest='inputFile', default=None, help='Path of the file containing the config that describes the DefragHistory time series')
parser.add_option('--iterations', dest='iterations', default=None, help='Number of data points to be considered in the time series')
parser.add_option('--debug', dest='debug', default=False, help='Verbose printing for debug use')
(options, args) = parser.parse_args()
input_t = utilityTSV.utilityTSV().checkInputFile(options.inputFile)
debug = utilityTSV.utilityTSV().checkDebugOption(options.debug)
identifier = utilityTSV.utilityTSV().checkIdentifier(options.identifier)
iterations = utilityTSV.utilityTSV().checkIterations(options.iterations)
viz = configToText(debug=debug)
viz.generateTextFromConfigFile(inputFile=input_t, identifier=identifier, iterations=iterations)
return 0
|
def main():
'\n main function to be called when the script is directly executed from the\n command line\n '
parser = optparse.OptionParser()
parser.add_option('--identifier', dest='identifier', default=None, help='Name of the data set that corresponds to the telemetry pull to be executed')
parser.add_option('--inputFile', dest='inputFile', default=None, help='Path of the file containing the config that describes the DefragHistory time series')
parser.add_option('--iterations', dest='iterations', default=None, help='Number of data points to be considered in the time series')
parser.add_option('--debug', dest='debug', default=False, help='Verbose printing for debug use')
(options, args) = parser.parse_args()
input_t = utilityTSV.utilityTSV().checkInputFile(options.inputFile)
debug = utilityTSV.utilityTSV().checkDebugOption(options.debug)
identifier = utilityTSV.utilityTSV().checkIdentifier(options.identifier)
iterations = utilityTSV.utilityTSV().checkIterations(options.iterations)
viz = configToText(debug=debug)
viz.generateTextFromConfigFile(inputFile=input_t, identifier=identifier, iterations=iterations)
return 0<|docstring|>main function to be called when the script is directly executed from the
command line<|endoftext|>
|
4fe403356abfc8e86db15e3fe9b4197ab393eb3f805e6830b7d3cacef083673d
|
def __init__(self, debug=False):
'\n function for initializing a configToText structure\n\n Args:\n debug: Boolean flag to activate debug statements\n\n Attributes:\n debug: Boolean flag to activate debug statements\n\n '
self.debug = debug
|
function for initializing a configToText structure
Args:
debug: Boolean flag to activate debug statements
Attributes:
debug: Boolean flag to activate debug statements
|
src/software/TSV/configToText.py
|
__init__
|
intel/RAAD
| 0
|
python
|
def __init__(self, debug=False):
'\n function for initializing a configToText structure\n\n Args:\n debug: Boolean flag to activate debug statements\n\n Attributes:\n debug: Boolean flag to activate debug statements\n\n '
self.debug = debug
|
def __init__(self, debug=False):
'\n function for initializing a configToText structure\n\n Args:\n debug: Boolean flag to activate debug statements\n\n Attributes:\n debug: Boolean flag to activate debug statements\n\n '
self.debug = debug<|docstring|>function for initializing a configToText structure
Args:
debug: Boolean flag to activate debug statements
Attributes:
debug: Boolean flag to activate debug statements<|endoftext|>
|
e8839f7b61a0b4c0e47b4317138991913446b589013110ef6d7d8857fefdce91
|
def getDebug(self):
'\n function for reading the debug flag stored in the visualizeTS attributes\n\n Returns:\n Boolean flag to activate debug statements\n\n '
return self.debug
|
function for reading the debug flag stored in the visualizeTS attributes
Returns:
Boolean flag to activate debug statements
|
src/software/TSV/configToText.py
|
getDebug
|
intel/RAAD
| 0
|
python
|
def getDebug(self):
'\n function for reading the debug flag stored in the visualizeTS attributes\n\n Returns:\n Boolean flag to activate debug statements\n\n '
return self.debug
|
def getDebug(self):
'\n function for reading the debug flag stored in the visualizeTS attributes\n\n Returns:\n Boolean flag to activate debug statements\n\n '
return self.debug<|docstring|>function for reading the debug flag stored in the visualizeTS attributes
Returns:
Boolean flag to activate debug statements<|endoftext|>
|
a9077208c473a09c96440ccd23df306be9e7840215fec7947989843c09624b52
|
def setDebug(self, debug):
'\n function for setting the debug flag stored in the visualizeTS attributes\n\n Args:\n debug: Boolean flag to activate debug statements\n\n Returns:\n\n '
self.debug = debug
|
function for setting the debug flag stored in the visualizeTS attributes
Args:
debug: Boolean flag to activate debug statements
Returns:
|
src/software/TSV/configToText.py
|
setDebug
|
intel/RAAD
| 0
|
python
|
def setDebug(self, debug):
'\n function for setting the debug flag stored in the visualizeTS attributes\n\n Args:\n debug: Boolean flag to activate debug statements\n\n Returns:\n\n '
self.debug = debug
|
def setDebug(self, debug):
'\n function for setting the debug flag stored in the visualizeTS attributes\n\n Args:\n debug: Boolean flag to activate debug statements\n\n Returns:\n\n '
self.debug = debug<|docstring|>function for setting the debug flag stored in the visualizeTS attributes
Args:
debug: Boolean flag to activate debug statements
Returns:<|endoftext|>
|
8bdcf53050de1fc648b57dd1928923f23de78aee30cb3b6a388251c5a8e5555f
|
def generateTextFromDict(self, tempDict, accumStr, indentLevel, index):
'\n Recursive function to process a single entry of the expanded dictionary and turn it into a plain text line\n\n Args:\n tempDict: Expanded dictionary containg the fields for an object\n accumStr: String storing the lines processed so far\n indentLevel: Integer indicating the nesting level of the current line\n index: Integer for the index of the current stamp in the time-series\n\n Returns:\n String of the lines processed so far\n\n '
for key in sorted(tempDict.keys()):
for i in range(indentLevel):
accumStr += ' '
if isinstance(tempDict[key], dict):
accumStr += (key + ' : \n')
accumStr = self.generateTextFromDict(tempDict[key], accumStr, (indentLevel + 1), index)
elif isinstance(tempDict[key], list):
accumStr += (((key + ' : ') + str(tempDict[key][index])) + '\n')
else:
accumStr += (((key + ' : ') + str(tempDict[key])) + '\n')
return accumStr
|
Recursive function to process a single entry of the expanded dictionary and turn it into a plain text line
Args:
tempDict: Expanded dictionary containg the fields for an object
accumStr: String storing the lines processed so far
indentLevel: Integer indicating the nesting level of the current line
index: Integer for the index of the current stamp in the time-series
Returns:
String of the lines processed so far
|
src/software/TSV/configToText.py
|
generateTextFromDict
|
intel/RAAD
| 0
|
python
|
def generateTextFromDict(self, tempDict, accumStr, indentLevel, index):
'\n Recursive function to process a single entry of the expanded dictionary and turn it into a plain text line\n\n Args:\n tempDict: Expanded dictionary containg the fields for an object\n accumStr: String storing the lines processed so far\n indentLevel: Integer indicating the nesting level of the current line\n index: Integer for the index of the current stamp in the time-series\n\n Returns:\n String of the lines processed so far\n\n '
for key in sorted(tempDict.keys()):
for i in range(indentLevel):
accumStr += ' '
if isinstance(tempDict[key], dict):
accumStr += (key + ' : \n')
accumStr = self.generateTextFromDict(tempDict[key], accumStr, (indentLevel + 1), index)
elif isinstance(tempDict[key], list):
accumStr += (((key + ' : ') + str(tempDict[key][index])) + '\n')
else:
accumStr += (((key + ' : ') + str(tempDict[key])) + '\n')
return accumStr
|
def generateTextFromDict(self, tempDict, accumStr, indentLevel, index):
'\n Recursive function to process a single entry of the expanded dictionary and turn it into a plain text line\n\n Args:\n tempDict: Expanded dictionary containg the fields for an object\n accumStr: String storing the lines processed so far\n indentLevel: Integer indicating the nesting level of the current line\n index: Integer for the index of the current stamp in the time-series\n\n Returns:\n String of the lines processed so far\n\n '
for key in sorted(tempDict.keys()):
for i in range(indentLevel):
accumStr += ' '
if isinstance(tempDict[key], dict):
accumStr += (key + ' : \n')
accumStr = self.generateTextFromDict(tempDict[key], accumStr, (indentLevel + 1), index)
elif isinstance(tempDict[key], list):
accumStr += (((key + ' : ') + str(tempDict[key][index])) + '\n')
else:
accumStr += (((key + ' : ') + str(tempDict[key])) + '\n')
return accumStr<|docstring|>Recursive function to process a single entry of the expanded dictionary and turn it into a plain text line
Args:
tempDict: Expanded dictionary containg the fields for an object
accumStr: String storing the lines processed so far
indentLevel: Integer indicating the nesting level of the current line
index: Integer for the index of the current stamp in the time-series
Returns:
String of the lines processed so far<|endoftext|>
|
fe1e6f563ae9a8f6e04a0f8b53232d094b129a1918ec8b082925aca269fcc779
|
def generateTextFromConfigFile(self, inputFile='time-series.ini', identifier='Tv2HiTAC', iterations=100):
'\n function for turning a configuration file into multiple plain text files, each containing all the objects for\n a single time stamp in the time-series of the configuration file\n\n Args:\n inputFile: String of the path to the configuration file to be used for processing\n identifier: String of the identifier for the name of the plain text files\n\n Returns:\n\n '
endString = '\n################################################################################################################\n'
config = cF.ConfigParser()
config.read(inputFile)
intermediateDict = DP.preprocessingAPI.loadConfigIntoDict(config, True)
resultDict = DP.preprocessingAPI.transformDict(intermediateDict, True)
for i in range(iterations):
currentTimeUTCString = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S-%f')
outFile = (((((identifier + '-') + str(i)) + '_') + currentTimeUTCString) + '.txt')
openFile = open(outFile, 'w+')
for key in resultDict.keys():
objectDict = resultDict[key]
name = resultDict[key]['name']
ref = resultDict[key]['ref']
minor = resultDict[key]['minor'][i]
major = resultDict[key]['major'][i]
uid = resultDict[key]['uid']
data_area = resultDict[key]['data-area'][i]
byte_size = resultDict[key]['byte-size'][i]
core = resultDict[key]['core'][i]
titleStr = ('\n %s, Core %s, Uid %s, Major %s, Minor %s, Data Area %s, byte Size %s, %s \n' % (name, core, uid, major, minor, data_area, byte_size, ref))
tempStr = ''
tempStr = self.generateTextFromDict(objectDict, tempStr, indentLevel=0, index=i)
if self.debug:
print(('Signature: ' + titleStr))
openFile.write(endString)
openFile.write(titleStr)
openFile.write(endString)
openFile.write(tempStr)
openFile.close()
|
function for turning a configuration file into multiple plain text files, each containing all the objects for
a single time stamp in the time-series of the configuration file
Args:
inputFile: String of the path to the configuration file to be used for processing
identifier: String of the identifier for the name of the plain text files
Returns:
|
src/software/TSV/configToText.py
|
generateTextFromConfigFile
|
intel/RAAD
| 0
|
python
|
def generateTextFromConfigFile(self, inputFile='time-series.ini', identifier='Tv2HiTAC', iterations=100):
'\n function for turning a configuration file into multiple plain text files, each containing all the objects for\n a single time stamp in the time-series of the configuration file\n\n Args:\n inputFile: String of the path to the configuration file to be used for processing\n identifier: String of the identifier for the name of the plain text files\n\n Returns:\n\n '
endString = '\n################################################################################################################\n'
config = cF.ConfigParser()
config.read(inputFile)
intermediateDict = DP.preprocessingAPI.loadConfigIntoDict(config, True)
resultDict = DP.preprocessingAPI.transformDict(intermediateDict, True)
for i in range(iterations):
currentTimeUTCString = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S-%f')
outFile = (((((identifier + '-') + str(i)) + '_') + currentTimeUTCString) + '.txt')
openFile = open(outFile, 'w+')
for key in resultDict.keys():
objectDict = resultDict[key]
name = resultDict[key]['name']
ref = resultDict[key]['ref']
minor = resultDict[key]['minor'][i]
major = resultDict[key]['major'][i]
uid = resultDict[key]['uid']
data_area = resultDict[key]['data-area'][i]
byte_size = resultDict[key]['byte-size'][i]
core = resultDict[key]['core'][i]
titleStr = ('\n %s, Core %s, Uid %s, Major %s, Minor %s, Data Area %s, byte Size %s, %s \n' % (name, core, uid, major, minor, data_area, byte_size, ref))
tempStr =
tempStr = self.generateTextFromDict(objectDict, tempStr, indentLevel=0, index=i)
if self.debug:
print(('Signature: ' + titleStr))
openFile.write(endString)
openFile.write(titleStr)
openFile.write(endString)
openFile.write(tempStr)
openFile.close()
|
def generateTextFromConfigFile(self, inputFile='time-series.ini', identifier='Tv2HiTAC', iterations=100):
'\n function for turning a configuration file into multiple plain text files, each containing all the objects for\n a single time stamp in the time-series of the configuration file\n\n Args:\n inputFile: String of the path to the configuration file to be used for processing\n identifier: String of the identifier for the name of the plain text files\n\n Returns:\n\n '
endString = '\n################################################################################################################\n'
config = cF.ConfigParser()
config.read(inputFile)
intermediateDict = DP.preprocessingAPI.loadConfigIntoDict(config, True)
resultDict = DP.preprocessingAPI.transformDict(intermediateDict, True)
for i in range(iterations):
currentTimeUTCString = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S-%f')
outFile = (((((identifier + '-') + str(i)) + '_') + currentTimeUTCString) + '.txt')
openFile = open(outFile, 'w+')
for key in resultDict.keys():
objectDict = resultDict[key]
name = resultDict[key]['name']
ref = resultDict[key]['ref']
minor = resultDict[key]['minor'][i]
major = resultDict[key]['major'][i]
uid = resultDict[key]['uid']
data_area = resultDict[key]['data-area'][i]
byte_size = resultDict[key]['byte-size'][i]
core = resultDict[key]['core'][i]
titleStr = ('\n %s, Core %s, Uid %s, Major %s, Minor %s, Data Area %s, byte Size %s, %s \n' % (name, core, uid, major, minor, data_area, byte_size, ref))
tempStr =
tempStr = self.generateTextFromDict(objectDict, tempStr, indentLevel=0, index=i)
if self.debug:
print(('Signature: ' + titleStr))
openFile.write(endString)
openFile.write(titleStr)
openFile.write(endString)
openFile.write(tempStr)
openFile.close()<|docstring|>function for turning a configuration file into multiple plain text files, each containing all the objects for
a single time stamp in the time-series of the configuration file
Args:
inputFile: String of the path to the configuration file to be used for processing
identifier: String of the identifier for the name of the plain text files
Returns:<|endoftext|>
|
16afa4ec8fec2b5c99aed5e65ad79416a49920ef10484f62b79ef89b331a136e
|
def __init__(self, behaviors_path, news_with_entity_path):
'\n Args:\n behaviors_path: path of behaviors tsv file\n example:\n clicked_news\tcandidate_news\tclicked\n N12142 N55361 N42151 N5313 N38326 N60863 N32104 N36290 N65 N43756 N1686 N54143 N64745 N54637 N56978 N26686 N31733 N31851 N32288 N57578 N39175 N22904 N9874 N7544 N7228 N61247 N39144 N28742 N10369 N12912 N29465 N38587 N49827 N35943\tN11611\t0\n\n news_with_entity_path: path of news_with_entity, map news id to title and entities\n example:\n id\ttitle\tentities\n N1\t[1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\t[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n '
super(Dataset, self).__init__()
self.behaviors = pd.read_table(behaviors_path)
self.behaviors.clicked_news.fillna('', inplace=True)
self.news_with_entity = pd.read_table(news_with_entity_path, index_col='id', converters={'title': literal_eval, 'entities': literal_eval})
|
Args:
behaviors_path: path of behaviors tsv file
example:
clicked_news candidate_news clicked
N12142 N55361 N42151 N5313 N38326 N60863 N32104 N36290 N65 N43756 N1686 N54143 N64745 N54637 N56978 N26686 N31733 N31851 N32288 N57578 N39175 N22904 N9874 N7544 N7228 N61247 N39144 N28742 N10369 N12912 N29465 N38587 N49827 N35943 N11611 0
news_with_entity_path: path of news_with_entity, map news id to title and entities
example:
id title entities
N1 [1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
|
src/dataset.py
|
__init__
|
yusanshi/DKN
| 32
|
python
|
def __init__(self, behaviors_path, news_with_entity_path):
'\n Args:\n behaviors_path: path of behaviors tsv file\n example:\n clicked_news\tcandidate_news\tclicked\n N12142 N55361 N42151 N5313 N38326 N60863 N32104 N36290 N65 N43756 N1686 N54143 N64745 N54637 N56978 N26686 N31733 N31851 N32288 N57578 N39175 N22904 N9874 N7544 N7228 N61247 N39144 N28742 N10369 N12912 N29465 N38587 N49827 N35943\tN11611\t0\n\n news_with_entity_path: path of news_with_entity, map news id to title and entities\n example:\n id\ttitle\tentities\n N1\t[1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\t[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n '
super(Dataset, self).__init__()
self.behaviors = pd.read_table(behaviors_path)
self.behaviors.clicked_news.fillna(, inplace=True)
self.news_with_entity = pd.read_table(news_with_entity_path, index_col='id', converters={'title': literal_eval, 'entities': literal_eval})
|
def __init__(self, behaviors_path, news_with_entity_path):
'\n Args:\n behaviors_path: path of behaviors tsv file\n example:\n clicked_news\tcandidate_news\tclicked\n N12142 N55361 N42151 N5313 N38326 N60863 N32104 N36290 N65 N43756 N1686 N54143 N64745 N54637 N56978 N26686 N31733 N31851 N32288 N57578 N39175 N22904 N9874 N7544 N7228 N61247 N39144 N28742 N10369 N12912 N29465 N38587 N49827 N35943\tN11611\t0\n\n news_with_entity_path: path of news_with_entity, map news id to title and entities\n example:\n id\ttitle\tentities\n N1\t[1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\t[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n '
super(Dataset, self).__init__()
self.behaviors = pd.read_table(behaviors_path)
self.behaviors.clicked_news.fillna(, inplace=True)
self.news_with_entity = pd.read_table(news_with_entity_path, index_col='id', converters={'title': literal_eval, 'entities': literal_eval})<|docstring|>Args:
behaviors_path: path of behaviors tsv file
example:
clicked_news candidate_news clicked
N12142 N55361 N42151 N5313 N38326 N60863 N32104 N36290 N65 N43756 N1686 N54143 N64745 N54637 N56978 N26686 N31733 N31851 N32288 N57578 N39175 N22904 N9874 N7544 N7228 N61247 N39144 N28742 N10369 N12912 N29465 N38587 N49827 N35943 N11611 0
news_with_entity_path: path of news_with_entity, map news id to title and entities
example:
id title entities
N1 [1, 2, 3, 4, 5, 6, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] [0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]<|endoftext|>
|
6e159bd933e831f94d71ade9256a5c2ea605244a897b746fb1d77116cdeb7665
|
def __getitem__(self, idx):
'\n example:\n {\n clicked: 0\n candidate_news:\n {\n "word": [0] * num_words_a_news,\n "entity": [0] * num_words_a_news\n }\n clicked_news:\n [\n {\n "word": [0] * num_words_a_news,\n "entity": [0] * num_words_a_news\n } * num_clicked_news_a_user\n ]\n }\n '
def news2dict(news, df):
return ({'word': df.loc[news].title, 'entity': df.loc[news].entities} if (news in df.index) else {'word': ([0] * Config.num_words_a_news), 'entity': ([0] * Config.num_words_a_news)})
item = {}
row = self.behaviors.iloc[idx]
item['clicked'] = row.clicked
item['candidate_news'] = news2dict(row.candidate_news, self.news_with_entity)
item['clicked_news'] = [news2dict(x, self.news_with_entity) for x in row.clicked_news.split()[:Config.num_clicked_news_a_user]]
padding = {'word': ([0] * Config.num_words_a_news), 'entity': ([0] * Config.num_words_a_news)}
repeated_times = (Config.num_clicked_news_a_user - len(item['clicked_news']))
assert (repeated_times >= 0)
item['clicked_news'].extend(([padding] * repeated_times))
return item
|
example:
{
clicked: 0
candidate_news:
{
"word": [0] * num_words_a_news,
"entity": [0] * num_words_a_news
}
clicked_news:
[
{
"word": [0] * num_words_a_news,
"entity": [0] * num_words_a_news
} * num_clicked_news_a_user
]
}
|
src/dataset.py
|
__getitem__
|
yusanshi/DKN
| 32
|
python
|
def __getitem__(self, idx):
'\n example:\n {\n clicked: 0\n candidate_news:\n {\n "word": [0] * num_words_a_news,\n "entity": [0] * num_words_a_news\n }\n clicked_news:\n [\n {\n "word": [0] * num_words_a_news,\n "entity": [0] * num_words_a_news\n } * num_clicked_news_a_user\n ]\n }\n '
def news2dict(news, df):
return ({'word': df.loc[news].title, 'entity': df.loc[news].entities} if (news in df.index) else {'word': ([0] * Config.num_words_a_news), 'entity': ([0] * Config.num_words_a_news)})
item = {}
row = self.behaviors.iloc[idx]
item['clicked'] = row.clicked
item['candidate_news'] = news2dict(row.candidate_news, self.news_with_entity)
item['clicked_news'] = [news2dict(x, self.news_with_entity) for x in row.clicked_news.split()[:Config.num_clicked_news_a_user]]
padding = {'word': ([0] * Config.num_words_a_news), 'entity': ([0] * Config.num_words_a_news)}
repeated_times = (Config.num_clicked_news_a_user - len(item['clicked_news']))
assert (repeated_times >= 0)
item['clicked_news'].extend(([padding] * repeated_times))
return item
|
def __getitem__(self, idx):
'\n example:\n {\n clicked: 0\n candidate_news:\n {\n "word": [0] * num_words_a_news,\n "entity": [0] * num_words_a_news\n }\n clicked_news:\n [\n {\n "word": [0] * num_words_a_news,\n "entity": [0] * num_words_a_news\n } * num_clicked_news_a_user\n ]\n }\n '
def news2dict(news, df):
return ({'word': df.loc[news].title, 'entity': df.loc[news].entities} if (news in df.index) else {'word': ([0] * Config.num_words_a_news), 'entity': ([0] * Config.num_words_a_news)})
item = {}
row = self.behaviors.iloc[idx]
item['clicked'] = row.clicked
item['candidate_news'] = news2dict(row.candidate_news, self.news_with_entity)
item['clicked_news'] = [news2dict(x, self.news_with_entity) for x in row.clicked_news.split()[:Config.num_clicked_news_a_user]]
padding = {'word': ([0] * Config.num_words_a_news), 'entity': ([0] * Config.num_words_a_news)}
repeated_times = (Config.num_clicked_news_a_user - len(item['clicked_news']))
assert (repeated_times >= 0)
item['clicked_news'].extend(([padding] * repeated_times))
return item<|docstring|>example:
{
clicked: 0
candidate_news:
{
"word": [0] * num_words_a_news,
"entity": [0] * num_words_a_news
}
clicked_news:
[
{
"word": [0] * num_words_a_news,
"entity": [0] * num_words_a_news
} * num_clicked_news_a_user
]
}<|endoftext|>
|
0ab16f392c0ead88c0501d21be0fe57251576f4b2a32f8b639f161d4df9d6dce
|
def __init__(self, default_values=None):
'Create object instance from environment variables and optional set\n of default values.\n\n Parameters\n ----------\n default_values: dict, optional\n Dictionary of default values\n '
if (default_values is None):
default_values = DEFAULT_SETTINGS
self.controller = base.ConfigObject(attributes=[('url', VIZIERWORKER_CONTROLLER_URL, base.STRING)], default_values=default_values)
self.env = base.ConfigObject(attributes=[('identifier', VIZIERWORKER_ENV, base.STRING), ('processor_path', VIZIERWORKER_PROCESSOR_PATH, base.STRING)], default_values=default_values)
self.logs = base.ConfigObject(attributes=[('worker', VIZIERWORKER_LOG_DIR, base.STRING)], default_values=default_values)
|
Create object instance from environment variables and optional set
of default values.
Parameters
----------
default_values: dict, optional
Dictionary of default values
|
vizier/config/worker.py
|
__init__
|
sanchitcop19/web-api-async
| 2
|
python
|
def __init__(self, default_values=None):
'Create object instance from environment variables and optional set\n of default values.\n\n Parameters\n ----------\n default_values: dict, optional\n Dictionary of default values\n '
if (default_values is None):
default_values = DEFAULT_SETTINGS
self.controller = base.ConfigObject(attributes=[('url', VIZIERWORKER_CONTROLLER_URL, base.STRING)], default_values=default_values)
self.env = base.ConfigObject(attributes=[('identifier', VIZIERWORKER_ENV, base.STRING), ('processor_path', VIZIERWORKER_PROCESSOR_PATH, base.STRING)], default_values=default_values)
self.logs = base.ConfigObject(attributes=[('worker', VIZIERWORKER_LOG_DIR, base.STRING)], default_values=default_values)
|
def __init__(self, default_values=None):
'Create object instance from environment variables and optional set\n of default values.\n\n Parameters\n ----------\n default_values: dict, optional\n Dictionary of default values\n '
if (default_values is None):
default_values = DEFAULT_SETTINGS
self.controller = base.ConfigObject(attributes=[('url', VIZIERWORKER_CONTROLLER_URL, base.STRING)], default_values=default_values)
self.env = base.ConfigObject(attributes=[('identifier', VIZIERWORKER_ENV, base.STRING), ('processor_path', VIZIERWORKER_PROCESSOR_PATH, base.STRING)], default_values=default_values)
self.logs = base.ConfigObject(attributes=[('worker', VIZIERWORKER_LOG_DIR, base.STRING)], default_values=default_values)<|docstring|>Create object instance from environment variables and optional set
of default values.
Parameters
----------
default_values: dict, optional
Dictionary of default values<|endoftext|>
|
5681b01a863d4fbf4c6ffe91920209d6848a7cdf08011bef1d59d31a6bce9add
|
def __init__(self, opt):
'Initialize this dataset class.\n\n Parameters:\n opt (option dicts) -- stores all the experiment options;\n '
self.opt = opt
self.K = opt['K']
self.fonteffects_dir = opt['fonteffects_dir']
self.num_cls = opt['num_cls']
self.clsdict = {}
for (i, source_cls) in enumerate((source_cls for source_cls in natsorted(os.listdir(self.fonteffects_dir)) if (not source_cls.startswith('.')))):
self.clsdict[str(source_cls)] = i
if (i >= (self.num_cls - 1)):
break
self.transform_fonteffects = get_transform(self.opt)
|
Initialize this dataset class.
Parameters:
opt (option dicts) -- stores all the experiment options;
|
codes/data/FontEffectsdataset.py
|
__init__
|
liweileev/FET-GAN
| 47
|
python
|
def __init__(self, opt):
'Initialize this dataset class.\n\n Parameters:\n opt (option dicts) -- stores all the experiment options;\n '
self.opt = opt
self.K = opt['K']
self.fonteffects_dir = opt['fonteffects_dir']
self.num_cls = opt['num_cls']
self.clsdict = {}
for (i, source_cls) in enumerate((source_cls for source_cls in natsorted(os.listdir(self.fonteffects_dir)) if (not source_cls.startswith('.')))):
self.clsdict[str(source_cls)] = i
if (i >= (self.num_cls - 1)):
break
self.transform_fonteffects = get_transform(self.opt)
|
def __init__(self, opt):
'Initialize this dataset class.\n\n Parameters:\n opt (option dicts) -- stores all the experiment options;\n '
self.opt = opt
self.K = opt['K']
self.fonteffects_dir = opt['fonteffects_dir']
self.num_cls = opt['num_cls']
self.clsdict = {}
for (i, source_cls) in enumerate((source_cls for source_cls in natsorted(os.listdir(self.fonteffects_dir)) if (not source_cls.startswith('.')))):
self.clsdict[str(source_cls)] = i
if (i >= (self.num_cls - 1)):
break
self.transform_fonteffects = get_transform(self.opt)<|docstring|>Initialize this dataset class.
Parameters:
opt (option dicts) -- stores all the experiment options;<|endoftext|>
|
e84941ce98b5f13896d27d15d29437c9937b3384b3ec7bb0ea9741c759a0849a
|
def __getitem__(self, index):
'Return a source image and K effect ref images.\n\n Parameters:\n index (int) -- a random integer for data indexing\n\n Returns a dictionary that contains source, refs\n source (tensor) -- an image in the source domain\n refs (tensor) -- K corresponding ref images in the target domain\n '
idx = index
for (i, source_cls) in enumerate((source_cls for source_cls in natsorted(os.listdir(self.fonteffects_dir))[:self.num_cls] if (not source_cls.startswith('.')))):
for source_name in (source_name for source_name in natsorted(os.listdir(os.path.join(self.fonteffects_dir, source_cls))) if (not source_name.startswith('.'))):
if is_image_file(source_name):
if (idx != 0):
idx -= 1
else:
break
if ((idx == 0) or (i >= (self.num_cls - 1))):
break
source_path = os.path.join(self.fonteffects_dir, source_cls, source_name)
source_label = self.clsdict[source_cls]
other_cls = [cls for cls in natsorted(os.listdir(self.fonteffects_dir))[:self.num_cls] if ((cls != source_cls) and (not cls.startswith('.')))]
ref_cls = random.choice(other_cls)
ref_paths = random.sample(glob.glob(os.path.join(self.fonteffects_dir, ref_cls, '*')), self.K)
refs_label = self.clsdict[ref_cls]
target_path = os.path.join(self.fonteffects_dir, ref_cls, source_name)
target_img = Image.open(target_path).convert('RGB')
target = self.transform_fonteffects(target_img)
source_img = Image.open(source_path).convert('RGB')
source = self.transform_fonteffects(source_img)
refs = torch.zeros(self.K, self.opt['input_nc'], self.opt['crop_size'], self.opt['crop_size'])
for (i, ref_path) in enumerate(ref_paths):
ref_img = Image.open(ref_path).convert('RGB')
ref = self.transform_fonteffects(ref_img)
refs[i] = ref
return {'source': source, 'source_label': source_label, 'target': target, 'refs': refs, 'refs_label': refs_label}
|
Return a source image and K effect ref images.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains source, refs
source (tensor) -- an image in the source domain
refs (tensor) -- K corresponding ref images in the target domain
|
codes/data/FontEffectsdataset.py
|
__getitem__
|
liweileev/FET-GAN
| 47
|
python
|
def __getitem__(self, index):
'Return a source image and K effect ref images.\n\n Parameters:\n index (int) -- a random integer for data indexing\n\n Returns a dictionary that contains source, refs\n source (tensor) -- an image in the source domain\n refs (tensor) -- K corresponding ref images in the target domain\n '
idx = index
for (i, source_cls) in enumerate((source_cls for source_cls in natsorted(os.listdir(self.fonteffects_dir))[:self.num_cls] if (not source_cls.startswith('.')))):
for source_name in (source_name for source_name in natsorted(os.listdir(os.path.join(self.fonteffects_dir, source_cls))) if (not source_name.startswith('.'))):
if is_image_file(source_name):
if (idx != 0):
idx -= 1
else:
break
if ((idx == 0) or (i >= (self.num_cls - 1))):
break
source_path = os.path.join(self.fonteffects_dir, source_cls, source_name)
source_label = self.clsdict[source_cls]
other_cls = [cls for cls in natsorted(os.listdir(self.fonteffects_dir))[:self.num_cls] if ((cls != source_cls) and (not cls.startswith('.')))]
ref_cls = random.choice(other_cls)
ref_paths = random.sample(glob.glob(os.path.join(self.fonteffects_dir, ref_cls, '*')), self.K)
refs_label = self.clsdict[ref_cls]
target_path = os.path.join(self.fonteffects_dir, ref_cls, source_name)
target_img = Image.open(target_path).convert('RGB')
target = self.transform_fonteffects(target_img)
source_img = Image.open(source_path).convert('RGB')
source = self.transform_fonteffects(source_img)
refs = torch.zeros(self.K, self.opt['input_nc'], self.opt['crop_size'], self.opt['crop_size'])
for (i, ref_path) in enumerate(ref_paths):
ref_img = Image.open(ref_path).convert('RGB')
ref = self.transform_fonteffects(ref_img)
refs[i] = ref
return {'source': source, 'source_label': source_label, 'target': target, 'refs': refs, 'refs_label': refs_label}
|
def __getitem__(self, index):
'Return a source image and K effect ref images.\n\n Parameters:\n index (int) -- a random integer for data indexing\n\n Returns a dictionary that contains source, refs\n source (tensor) -- an image in the source domain\n refs (tensor) -- K corresponding ref images in the target domain\n '
idx = index
for (i, source_cls) in enumerate((source_cls for source_cls in natsorted(os.listdir(self.fonteffects_dir))[:self.num_cls] if (not source_cls.startswith('.')))):
for source_name in (source_name for source_name in natsorted(os.listdir(os.path.join(self.fonteffects_dir, source_cls))) if (not source_name.startswith('.'))):
if is_image_file(source_name):
if (idx != 0):
idx -= 1
else:
break
if ((idx == 0) or (i >= (self.num_cls - 1))):
break
source_path = os.path.join(self.fonteffects_dir, source_cls, source_name)
source_label = self.clsdict[source_cls]
other_cls = [cls for cls in natsorted(os.listdir(self.fonteffects_dir))[:self.num_cls] if ((cls != source_cls) and (not cls.startswith('.')))]
ref_cls = random.choice(other_cls)
ref_paths = random.sample(glob.glob(os.path.join(self.fonteffects_dir, ref_cls, '*')), self.K)
refs_label = self.clsdict[ref_cls]
target_path = os.path.join(self.fonteffects_dir, ref_cls, source_name)
target_img = Image.open(target_path).convert('RGB')
target = self.transform_fonteffects(target_img)
source_img = Image.open(source_path).convert('RGB')
source = self.transform_fonteffects(source_img)
refs = torch.zeros(self.K, self.opt['input_nc'], self.opt['crop_size'], self.opt['crop_size'])
for (i, ref_path) in enumerate(ref_paths):
ref_img = Image.open(ref_path).convert('RGB')
ref = self.transform_fonteffects(ref_img)
refs[i] = ref
return {'source': source, 'source_label': source_label, 'target': target, 'refs': refs, 'refs_label': refs_label}<|docstring|>Return a source image and K effect ref images.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains source, refs
source (tensor) -- an image in the source domain
refs (tensor) -- K corresponding ref images in the target domain<|endoftext|>
|
c82d03fc16ccbf8241620587cd50d53e25f520e923edc1fc1dd44785cb6e6f44
|
def __len__(self):
'Return the total number of images in the dataset.\n '
len = 0
for (i, source_cls) in enumerate((source_cls for source_cls in natsorted(os.listdir(self.fonteffects_dir)) if (not source_cls.startswith('.')))):
for source_name in (source_name for source_name in natsorted(os.listdir(os.path.join(self.fonteffects_dir, source_cls))) if (not source_name.startswith('.'))):
if is_image_file(source_name):
len += 1
if (i >= (self.num_cls - 1)):
break
return len
|
Return the total number of images in the dataset.
|
codes/data/FontEffectsdataset.py
|
__len__
|
liweileev/FET-GAN
| 47
|
python
|
def __len__(self):
'\n '
len = 0
for (i, source_cls) in enumerate((source_cls for source_cls in natsorted(os.listdir(self.fonteffects_dir)) if (not source_cls.startswith('.')))):
for source_name in (source_name for source_name in natsorted(os.listdir(os.path.join(self.fonteffects_dir, source_cls))) if (not source_name.startswith('.'))):
if is_image_file(source_name):
len += 1
if (i >= (self.num_cls - 1)):
break
return len
|
def __len__(self):
'\n '
len = 0
for (i, source_cls) in enumerate((source_cls for source_cls in natsorted(os.listdir(self.fonteffects_dir)) if (not source_cls.startswith('.')))):
for source_name in (source_name for source_name in natsorted(os.listdir(os.path.join(self.fonteffects_dir, source_cls))) if (not source_name.startswith('.'))):
if is_image_file(source_name):
len += 1
if (i >= (self.num_cls - 1)):
break
return len<|docstring|>Return the total number of images in the dataset.<|endoftext|>
|
d1abdcecf3880f92e30449bbe772d674c0f454fb86a4d1450a05e97ad4d5b24e
|
def get_box_union(boxes: Boxes):
'Merge all boxes into a single box'
if (len(boxes) == 0):
return boxes
bt = boxes.tensor
union_bt = torch.cat((torch.min(bt[(:, :2)], 0).values, torch.max(bt[(:, 2:)], 0).values)).reshape(1, (- 1))
return Boxes(union_bt)
|
Merge all boxes into a single box
|
d2go/data/transforms/box_utils.py
|
get_box_union
|
ppwwyyxx/d2go
| 687
|
python
|
def get_box_union(boxes: Boxes):
if (len(boxes) == 0):
return boxes
bt = boxes.tensor
union_bt = torch.cat((torch.min(bt[(:, :2)], 0).values, torch.max(bt[(:, 2:)], 0).values)).reshape(1, (- 1))
return Boxes(union_bt)
|
def get_box_union(boxes: Boxes):
if (len(boxes) == 0):
return boxes
bt = boxes.tensor
union_bt = torch.cat((torch.min(bt[(:, :2)], 0).values, torch.max(bt[(:, 2:)], 0).values)).reshape(1, (- 1))
return Boxes(union_bt)<|docstring|>Merge all boxes into a single box<|endoftext|>
|
486e1430fb2c2adccb89b669cb07470aaa82589dc401512c86eb15374e79565d
|
def get_box_from_mask(mask: torch.Tensor) -> Tuple[(int, int, int, int)]:
'Find if there are non-zero elements per row/column first and then find\n min/max position of those elements.\n Only support 2d image (h x w)\n Return (x1, y1, w, h) if bbox found, otherwise None\n '
assert (len(mask.shape) == 2), f'Invalid shape {mask.shape}'
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
if ((bool(np.any(rows)) is False) or (bool(np.any(cols)) is False)):
return None
(rmin, rmax) = np.where(rows)[0][[0, (- 1)]]
(cmin, cmax) = np.where(cols)[0][[0, (- 1)]]
assert (cmax >= cmin), f'cmax={cmax}, cmin={cmin}'
assert (rmax >= rmin), f'rmax={rmax}, rmin={rmin}'
return (cmin, rmin, ((cmax - cmin) + 1), ((rmax - rmin) + 1))
|
Find if there are non-zero elements per row/column first and then find
min/max position of those elements.
Only support 2d image (h x w)
Return (x1, y1, w, h) if bbox found, otherwise None
|
d2go/data/transforms/box_utils.py
|
get_box_from_mask
|
ppwwyyxx/d2go
| 687
|
python
|
def get_box_from_mask(mask: torch.Tensor) -> Tuple[(int, int, int, int)]:
'Find if there are non-zero elements per row/column first and then find\n min/max position of those elements.\n Only support 2d image (h x w)\n Return (x1, y1, w, h) if bbox found, otherwise None\n '
assert (len(mask.shape) == 2), f'Invalid shape {mask.shape}'
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
if ((bool(np.any(rows)) is False) or (bool(np.any(cols)) is False)):
return None
(rmin, rmax) = np.where(rows)[0][[0, (- 1)]]
(cmin, cmax) = np.where(cols)[0][[0, (- 1)]]
assert (cmax >= cmin), f'cmax={cmax}, cmin={cmin}'
assert (rmax >= rmin), f'rmax={rmax}, rmin={rmin}'
return (cmin, rmin, ((cmax - cmin) + 1), ((rmax - rmin) + 1))
|
def get_box_from_mask(mask: torch.Tensor) -> Tuple[(int, int, int, int)]:
'Find if there are non-zero elements per row/column first and then find\n min/max position of those elements.\n Only support 2d image (h x w)\n Return (x1, y1, w, h) if bbox found, otherwise None\n '
assert (len(mask.shape) == 2), f'Invalid shape {mask.shape}'
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
if ((bool(np.any(rows)) is False) or (bool(np.any(cols)) is False)):
return None
(rmin, rmax) = np.where(rows)[0][[0, (- 1)]]
(cmin, cmax) = np.where(cols)[0][[0, (- 1)]]
assert (cmax >= cmin), f'cmax={cmax}, cmin={cmin}'
assert (rmax >= rmin), f'rmax={rmax}, rmin={rmin}'
return (cmin, rmin, ((cmax - cmin) + 1), ((rmax - rmin) + 1))<|docstring|>Find if there are non-zero elements per row/column first and then find
min/max position of those elements.
Only support 2d image (h x w)
Return (x1, y1, w, h) if bbox found, otherwise None<|endoftext|>
|
a6fcf1ad1b298bdb200d1c98aa3c3b7411b6bef13233f31aa20fefb3df311f54
|
def get_min_box_aspect_ratio(bbox_xywh: torch.Tensor, target_aspect_ratio: float) -> torch.Tensor:
'Get a minimal bbox that matches the target_aspect_ratio\n target_aspect_ratio is representation by w/h\n bbox are represented by pixel coordinates'
bbox_xywh = torch.Tensor(bbox_xywh)
(box_w, box_h) = bbox_xywh[2:]
box_ar = (float(box_w) / box_h)
if (box_ar >= target_aspect_ratio):
new_w = box_w
new_h = (float(new_w) / target_aspect_ratio)
else:
new_h = box_h
new_w = (new_h * target_aspect_ratio)
new_wh = torch.Tensor([new_w, new_h])
bbox_center = (bbox_xywh[:2] + (bbox_xywh[2:] / 2.0))
new_xy = (bbox_center - (new_wh / 2.0))
return torch.cat([new_xy, new_wh])
|
Get a minimal bbox that matches the target_aspect_ratio
target_aspect_ratio is representation by w/h
bbox are represented by pixel coordinates
|
d2go/data/transforms/box_utils.py
|
get_min_box_aspect_ratio
|
ppwwyyxx/d2go
| 687
|
python
|
def get_min_box_aspect_ratio(bbox_xywh: torch.Tensor, target_aspect_ratio: float) -> torch.Tensor:
'Get a minimal bbox that matches the target_aspect_ratio\n target_aspect_ratio is representation by w/h\n bbox are represented by pixel coordinates'
bbox_xywh = torch.Tensor(bbox_xywh)
(box_w, box_h) = bbox_xywh[2:]
box_ar = (float(box_w) / box_h)
if (box_ar >= target_aspect_ratio):
new_w = box_w
new_h = (float(new_w) / target_aspect_ratio)
else:
new_h = box_h
new_w = (new_h * target_aspect_ratio)
new_wh = torch.Tensor([new_w, new_h])
bbox_center = (bbox_xywh[:2] + (bbox_xywh[2:] / 2.0))
new_xy = (bbox_center - (new_wh / 2.0))
return torch.cat([new_xy, new_wh])
|
def get_min_box_aspect_ratio(bbox_xywh: torch.Tensor, target_aspect_ratio: float) -> torch.Tensor:
'Get a minimal bbox that matches the target_aspect_ratio\n target_aspect_ratio is representation by w/h\n bbox are represented by pixel coordinates'
bbox_xywh = torch.Tensor(bbox_xywh)
(box_w, box_h) = bbox_xywh[2:]
box_ar = (float(box_w) / box_h)
if (box_ar >= target_aspect_ratio):
new_w = box_w
new_h = (float(new_w) / target_aspect_ratio)
else:
new_h = box_h
new_w = (new_h * target_aspect_ratio)
new_wh = torch.Tensor([new_w, new_h])
bbox_center = (bbox_xywh[:2] + (bbox_xywh[2:] / 2.0))
new_xy = (bbox_center - (new_wh / 2.0))
return torch.cat([new_xy, new_wh])<|docstring|>Get a minimal bbox that matches the target_aspect_ratio
target_aspect_ratio is representation by w/h
bbox are represented by pixel coordinates<|endoftext|>
|
56dfd0cf0cf7964de6a01077c0fa1f5f9cdf5d8f6324f328adf5d231db0bfb92
|
def get_box_center(bbox_xywh: torch.Tensor) -> torch.Tensor:
'Get the center of the bbox'
return (torch.Tensor(bbox_xywh[:2]) + (torch.Tensor(bbox_xywh[2:]) / 2.0))
|
Get the center of the bbox
|
d2go/data/transforms/box_utils.py
|
get_box_center
|
ppwwyyxx/d2go
| 687
|
python
|
def get_box_center(bbox_xywh: torch.Tensor) -> torch.Tensor:
return (torch.Tensor(bbox_xywh[:2]) + (torch.Tensor(bbox_xywh[2:]) / 2.0))
|
def get_box_center(bbox_xywh: torch.Tensor) -> torch.Tensor:
return (torch.Tensor(bbox_xywh[:2]) + (torch.Tensor(bbox_xywh[2:]) / 2.0))<|docstring|>Get the center of the bbox<|endoftext|>
|
2d07d9ab21253c02071db123d9375f36767a68f754884a984b534bb965597c41
|
def get_bbox_xywh_from_center_wh(bbox_center: torch.Tensor, bbox_wh: torch.Tensor) -> torch.Tensor:
'Get a bbox from bbox center and the width and height'
bbox_wh = torch.Tensor(bbox_wh)
bbox_xy = (torch.Tensor(bbox_center) - (bbox_wh / 2.0))
return torch.cat([bbox_xy, bbox_wh])
|
Get a bbox from bbox center and the width and height
|
d2go/data/transforms/box_utils.py
|
get_bbox_xywh_from_center_wh
|
ppwwyyxx/d2go
| 687
|
python
|
def get_bbox_xywh_from_center_wh(bbox_center: torch.Tensor, bbox_wh: torch.Tensor) -> torch.Tensor:
bbox_wh = torch.Tensor(bbox_wh)
bbox_xy = (torch.Tensor(bbox_center) - (bbox_wh / 2.0))
return torch.cat([bbox_xy, bbox_wh])
|
def get_bbox_xywh_from_center_wh(bbox_center: torch.Tensor, bbox_wh: torch.Tensor) -> torch.Tensor:
bbox_wh = torch.Tensor(bbox_wh)
bbox_xy = (torch.Tensor(bbox_center) - (bbox_wh / 2.0))
return torch.cat([bbox_xy, bbox_wh])<|docstring|>Get a bbox from bbox center and the width and height<|endoftext|>
|
e8cf6ee9bc0d64b63aaa9f3bae8f0192a9ac223012e8613b528834a9165eca9d
|
def get_bbox_xyxy_from_xywh(bbox_xywh: torch.Tensor) -> torch.Tensor:
'Convert the bbox from xywh format to xyxy format\n bbox are represented by pixel coordinates,\n the center of pixels are (x + 0.5, y + 0.5)\n '
return torch.Tensor([bbox_xywh[0], bbox_xywh[1], (bbox_xywh[0] + bbox_xywh[2]), (bbox_xywh[1] + bbox_xywh[3])])
|
Convert the bbox from xywh format to xyxy format
bbox are represented by pixel coordinates,
the center of pixels are (x + 0.5, y + 0.5)
|
d2go/data/transforms/box_utils.py
|
get_bbox_xyxy_from_xywh
|
ppwwyyxx/d2go
| 687
|
python
|
def get_bbox_xyxy_from_xywh(bbox_xywh: torch.Tensor) -> torch.Tensor:
'Convert the bbox from xywh format to xyxy format\n bbox are represented by pixel coordinates,\n the center of pixels are (x + 0.5, y + 0.5)\n '
return torch.Tensor([bbox_xywh[0], bbox_xywh[1], (bbox_xywh[0] + bbox_xywh[2]), (bbox_xywh[1] + bbox_xywh[3])])
|
def get_bbox_xyxy_from_xywh(bbox_xywh: torch.Tensor) -> torch.Tensor:
'Convert the bbox from xywh format to xyxy format\n bbox are represented by pixel coordinates,\n the center of pixels are (x + 0.5, y + 0.5)\n '
return torch.Tensor([bbox_xywh[0], bbox_xywh[1], (bbox_xywh[0] + bbox_xywh[2]), (bbox_xywh[1] + bbox_xywh[3])])<|docstring|>Convert the bbox from xywh format to xyxy format
bbox are represented by pixel coordinates,
the center of pixels are (x + 0.5, y + 0.5)<|endoftext|>
|
d4c9d14624a5c86081946108f1481778d7491deafb0c3e0242b60af7fc9b717a
|
def get_bbox_xywh_from_xyxy(bbox_xyxy: torch.Tensor) -> torch.Tensor:
'Convert the bbox from xyxy format to xywh format'
return torch.Tensor([bbox_xyxy[0], bbox_xyxy[1], (bbox_xyxy[2] - bbox_xyxy[0]), (bbox_xyxy[3] - bbox_xyxy[1])])
|
Convert the bbox from xyxy format to xywh format
|
d2go/data/transforms/box_utils.py
|
get_bbox_xywh_from_xyxy
|
ppwwyyxx/d2go
| 687
|
python
|
def get_bbox_xywh_from_xyxy(bbox_xyxy: torch.Tensor) -> torch.Tensor:
return torch.Tensor([bbox_xyxy[0], bbox_xyxy[1], (bbox_xyxy[2] - bbox_xyxy[0]), (bbox_xyxy[3] - bbox_xyxy[1])])
|
def get_bbox_xywh_from_xyxy(bbox_xyxy: torch.Tensor) -> torch.Tensor:
return torch.Tensor([bbox_xyxy[0], bbox_xyxy[1], (bbox_xyxy[2] - bbox_xyxy[0]), (bbox_xyxy[3] - bbox_xyxy[1])])<|docstring|>Convert the bbox from xyxy format to xywh format<|endoftext|>
|
9da2c585c422434f45374c1bef7df3f7a0603179a98b187b4f2908963b9d161b
|
def scale_bbox_center(bbox_xywh: torch.Tensor, target_scale: float) -> torch.Tensor:
'Scale the bbox around the center of the bbox'
box_center = get_box_center(bbox_xywh)
box_wh = (torch.Tensor(bbox_xywh[2:]) * target_scale)
return get_bbox_xywh_from_center_wh(box_center, box_wh)
|
Scale the bbox around the center of the bbox
|
d2go/data/transforms/box_utils.py
|
scale_bbox_center
|
ppwwyyxx/d2go
| 687
|
python
|
def scale_bbox_center(bbox_xywh: torch.Tensor, target_scale: float) -> torch.Tensor:
box_center = get_box_center(bbox_xywh)
box_wh = (torch.Tensor(bbox_xywh[2:]) * target_scale)
return get_bbox_xywh_from_center_wh(box_center, box_wh)
|
def scale_bbox_center(bbox_xywh: torch.Tensor, target_scale: float) -> torch.Tensor:
box_center = get_box_center(bbox_xywh)
box_wh = (torch.Tensor(bbox_xywh[2:]) * target_scale)
return get_bbox_xywh_from_center_wh(box_center, box_wh)<|docstring|>Scale the bbox around the center of the bbox<|endoftext|>
|
7126f53d3064d8658fcea5e019b5d4ce2e800efd884fb2c47505d8e8032fad6c
|
def offset_bbox(bbox_xywh: torch.Tensor, target_offset: float) -> torch.Tensor:
'Offset the bbox based on target_offset'
box_center = get_box_center(bbox_xywh)
new_center = (box_center + torch.Tensor(target_offset))
return get_bbox_xywh_from_center_wh(new_center, bbox_xywh[2:])
|
Offset the bbox based on target_offset
|
d2go/data/transforms/box_utils.py
|
offset_bbox
|
ppwwyyxx/d2go
| 687
|
python
|
def offset_bbox(bbox_xywh: torch.Tensor, target_offset: float) -> torch.Tensor:
box_center = get_box_center(bbox_xywh)
new_center = (box_center + torch.Tensor(target_offset))
return get_bbox_xywh_from_center_wh(new_center, bbox_xywh[2:])
|
def offset_bbox(bbox_xywh: torch.Tensor, target_offset: float) -> torch.Tensor:
box_center = get_box_center(bbox_xywh)
new_center = (box_center + torch.Tensor(target_offset))
return get_bbox_xywh_from_center_wh(new_center, bbox_xywh[2:])<|docstring|>Offset the bbox based on target_offset<|endoftext|>
|
7c874b63e8f961c47409b2690e2e6e0cfff38d3834c3a01286b15cd205790601
|
def clip_box_xywh(bbox_xywh: torch.Tensor, image_size_hw: List[int]):
'Clip the bbox based on image_size_hw'
(h, w) = image_size_hw
bbox_xyxy = get_bbox_xyxy_from_xywh(bbox_xywh)
bbox_xyxy[0] = max(bbox_xyxy[0], 0)
bbox_xyxy[1] = max(bbox_xyxy[1], 0)
bbox_xyxy[2] = min(bbox_xyxy[2], w)
bbox_xyxy[3] = min(bbox_xyxy[3], h)
return get_bbox_xywh_from_xyxy(bbox_xyxy)
|
Clip the bbox based on image_size_hw
|
d2go/data/transforms/box_utils.py
|
clip_box_xywh
|
ppwwyyxx/d2go
| 687
|
python
|
def clip_box_xywh(bbox_xywh: torch.Tensor, image_size_hw: List[int]):
(h, w) = image_size_hw
bbox_xyxy = get_bbox_xyxy_from_xywh(bbox_xywh)
bbox_xyxy[0] = max(bbox_xyxy[0], 0)
bbox_xyxy[1] = max(bbox_xyxy[1], 0)
bbox_xyxy[2] = min(bbox_xyxy[2], w)
bbox_xyxy[3] = min(bbox_xyxy[3], h)
return get_bbox_xywh_from_xyxy(bbox_xyxy)
|
def clip_box_xywh(bbox_xywh: torch.Tensor, image_size_hw: List[int]):
(h, w) = image_size_hw
bbox_xyxy = get_bbox_xyxy_from_xywh(bbox_xywh)
bbox_xyxy[0] = max(bbox_xyxy[0], 0)
bbox_xyxy[1] = max(bbox_xyxy[1], 0)
bbox_xyxy[2] = min(bbox_xyxy[2], w)
bbox_xyxy[3] = min(bbox_xyxy[3], h)
return get_bbox_xywh_from_xyxy(bbox_xyxy)<|docstring|>Clip the bbox based on image_size_hw<|endoftext|>
|
5a32ff4dc285d2486deb6743d7f10871a592c506fdd6d4f3e21b24bfa1bc8658
|
def average_gradients(tower_grads):
'Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n '
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for (g, _) in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
|
train_JPPNet-s2.py
|
average_gradients
|
Vickyilango/LIP_JPPNet
| 326
|
python
|
def average_gradients(tower_grads):
'Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n '
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for (g, _) in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
def average_gradients(tower_grads):
'Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n '
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for (g, _) in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads<|docstring|>Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.<|endoftext|>
|
1dd05b2103a0d8021c989bf7f6130a5d4f7ce841ad53cc48b0bc8bd63954a2cb
|
def run(wsp):
'\n Generate HTML report from OXASL workspace\n '
report_build_dir = None
if wsp.debug:
report_build_dir = os.path.join(wsp.savedir, 'report_build')
wsp.log.write('\nGenerating HTML report\n')
report_dir = os.path.join(wsp.savedir, 'report')
success = wsp.report.generate_html(report_dir, report_build_dir, log=wsp.log)
if success:
wsp.log.write((' - Report generated in %s\n' % report_dir))
|
Generate HTML report from OXASL workspace
|
oxasl/reporting.py
|
run
|
physimals/oxasl
| 1
|
python
|
def run(wsp):
'\n \n '
report_build_dir = None
if wsp.debug:
report_build_dir = os.path.join(wsp.savedir, 'report_build')
wsp.log.write('\nGenerating HTML report\n')
report_dir = os.path.join(wsp.savedir, 'report')
success = wsp.report.generate_html(report_dir, report_build_dir, log=wsp.log)
if success:
wsp.log.write((' - Report generated in %s\n' % report_dir))
|
def run(wsp):
'\n \n '
report_build_dir = None
if wsp.debug:
report_build_dir = os.path.join(wsp.savedir, 'report_build')
wsp.log.write('\nGenerating HTML report\n')
report_dir = os.path.join(wsp.savedir, 'report')
success = wsp.report.generate_html(report_dir, report_build_dir, log=wsp.log)
if success:
wsp.log.write((' - Report generated in %s\n' % report_dir))<|docstring|>Generate HTML report from OXASL workspace<|endoftext|>
|
deb12521c162bd1b698333bbaf1eab775763765a5ac2d8dd8c3b761ed84741cb
|
def which(program):
'\n Simple implementation of which to search for executable\n '
def is_exe(fpath):
return (os.path.isfile(fpath) and os.access(fpath, os.X_OK))
(fpath, _fname) = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
|
Simple implementation of which to search for executable
|
oxasl/reporting.py
|
which
|
physimals/oxasl
| 1
|
python
|
def which(program):
'\n \n '
def is_exe(fpath):
return (os.path.isfile(fpath) and os.access(fpath, os.X_OK))
(fpath, _fname) = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
|
def which(program):
'\n \n '
def is_exe(fpath):
return (os.path.isfile(fpath) and os.access(fpath, os.X_OK))
(fpath, _fname) = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None<|docstring|>Simple implementation of which to search for executable<|endoftext|>
|
26bc74685fa81da374921267aff5690028bc72392377a477095d91b0eedfbcdd
|
def main():
'\n Simple command line for testing\n '
report = Report()
page = report.page('test')
page.image('lbox', LightboxImage(*[Image(fname) for fname in sys.argv[1:]]))
report.generate_html('testreport')
|
Simple command line for testing
|
oxasl/reporting.py
|
main
|
physimals/oxasl
| 1
|
python
|
def main():
'\n \n '
report = Report()
page = report.page('test')
page.image('lbox', LightboxImage(*[Image(fname) for fname in sys.argv[1:]]))
report.generate_html('testreport')
|
def main():
'\n \n '
report = Report()
page = report.page('test')
page.image('lbox', LightboxImage(*[Image(fname) for fname in sys.argv[1:]]))
report.generate_html('testreport')<|docstring|>Simple command line for testing<|endoftext|>
|
9cb19b287111ac5a0bf7c4df0b530ee09c0acd9a2c1201912cea90e6c2f0d85c
|
def __init__(self, img, bgimage=None, mask=None, **kwargs):
'\n :param img: ``fsl.data.Image`` instance\n :param bgimage: ``fsl.data.Image`` instance which will be used as a greyscale background\n :param mask: ``fsl.data.Image`` instance which will be used to mask ``img``\n\n Keyword arguments:\n\n :param zeromask: If True, treat zero values as transparent\n :param outline: If True, show image as an outline (assumes image is binarised)\n :param colorbar: If True, display colorbar\n '
self._img = img
self._bgimage = bgimage
self._mask = mask
self._zeromask = kwargs.get('zeromask', True)
self._outline = kwargs.get('outline', False)
self._colorbar = kwargs.get('colorbar', False)
self._clamp_colors = kwargs.get('clamp_colors', True)
self.extension = '.png'
|
:param img: ``fsl.data.Image`` instance
:param bgimage: ``fsl.data.Image`` instance which will be used as a greyscale background
:param mask: ``fsl.data.Image`` instance which will be used to mask ``img``
Keyword arguments:
:param zeromask: If True, treat zero values as transparent
:param outline: If True, show image as an outline (assumes image is binarised)
:param colorbar: If True, display colorbar
|
oxasl/reporting.py
|
__init__
|
physimals/oxasl
| 1
|
python
|
def __init__(self, img, bgimage=None, mask=None, **kwargs):
'\n :param img: ``fsl.data.Image`` instance\n :param bgimage: ``fsl.data.Image`` instance which will be used as a greyscale background\n :param mask: ``fsl.data.Image`` instance which will be used to mask ``img``\n\n Keyword arguments:\n\n :param zeromask: If True, treat zero values as transparent\n :param outline: If True, show image as an outline (assumes image is binarised)\n :param colorbar: If True, display colorbar\n '
self._img = img
self._bgimage = bgimage
self._mask = mask
self._zeromask = kwargs.get('zeromask', True)
self._outline = kwargs.get('outline', False)
self._colorbar = kwargs.get('colorbar', False)
self._clamp_colors = kwargs.get('clamp_colors', True)
self.extension = '.png'
|
def __init__(self, img, bgimage=None, mask=None, **kwargs):
'\n :param img: ``fsl.data.Image`` instance\n :param bgimage: ``fsl.data.Image`` instance which will be used as a greyscale background\n :param mask: ``fsl.data.Image`` instance which will be used to mask ``img``\n\n Keyword arguments:\n\n :param zeromask: If True, treat zero values as transparent\n :param outline: If True, show image as an outline (assumes image is binarised)\n :param colorbar: If True, display colorbar\n '
self._img = img
self._bgimage = bgimage
self._mask = mask
self._zeromask = kwargs.get('zeromask', True)
self._outline = kwargs.get('outline', False)
self._colorbar = kwargs.get('colorbar', False)
self._clamp_colors = kwargs.get('clamp_colors', True)
self.extension = '.png'<|docstring|>:param img: ``fsl.data.Image`` instance
:param bgimage: ``fsl.data.Image`` instance which will be used as a greyscale background
:param mask: ``fsl.data.Image`` instance which will be used to mask ``img``
Keyword arguments:
:param zeromask: If True, treat zero values as transparent
:param outline: If True, show image as an outline (assumes image is binarised)
:param colorbar: If True, display colorbar<|endoftext|>
|
6912097ab46cc1eff135528ae3fde7acf494b77a1e30ec02fc4d6ee817a3cfef
|
def tofile(self, fname):
'\n Write image to a file\n '
if (Figure is None):
warnings.warn('matplotlib not installed - cannot generate images')
return
shape = None
for img in [self._img, self._bgimage, self._mask]:
if (img is None):
continue
if (not isinstance(img, Image)):
raise ValueError(('%s: Images must be instances of fsl.data.Image: %s' % (fname, img)))
if (shape is None):
shape = img.shape
if (img.ndim != 3):
raise ValueError('Images must be 3D')
if (img.shape != shape):
raise ValueError('Images do not have consistent shapes')
(min_slice, max_slice) = self._slicerange(self._img, shape)
num_slices = min(16, ((max_slice - min_slice) + 1))
grid_size = int(math.ceil(math.sqrt(num_slices)))
fig = Figure(figsize=(5, 5), dpi=200)
FigureCanvas(fig)
for nslice in range(num_slices):
axes = fig.add_subplot(grid_size, grid_size, (nslice + 1))
axes.set_yticklabels([])
axes.set_xticklabels([])
axes.set_xticks([])
axes.set_yticks([])
slice_idx = (int((float((((max_slice - min_slice) + 1) * nslice)) / num_slices)) + min_slice)
if self._bgimage:
data = self._bgimage.data[(:, :, slice_idx)].T
axes.imshow(data, cmap='gray')
if self._img:
data = self._img.data[(:, :, slice_idx)].T
data[(~ np.isfinite(data))] = 0
if issubclass(data.dtype.type, np.integer):
cmap = 'Reds'
(vmax, vmin) = (np.max(self._img.data), np.min(self._img.data))
else:
cmap = 'viridis'
(vmax, vmin) = (np.percentile(self._img.data, 99), np.percentile(self._img.data, 1))
if (vmax == vmin):
(vmax, vmin) = (np.max(self._img.data), np.min(self._img.data))
if self._clamp_colors:
data = np.clip(data, vmin, vmax)
if self._outline:
data = (data > 0.5).astype(np.int)
data = (data - scipy.ndimage.morphology.binary_erosion(data, structure=np.ones((3, 3))))
if self._mask:
data = np.ma.masked_array(data, (self._mask.data[(:, :, slice_idx)].T == 0))
elif self._zeromask:
data = np.ma.masked_array(data, (data == 0))
img = axes.imshow(data, vmax=vmax, vmin=vmin, cmap=cmap)
axes.set_ylim(axes.get_ylim()[::(- 1)])
fig.subplots_adjust(wspace=0, hspace=0.05)
if (self._img and self._colorbar):
fig.colorbar(img, ax=fig.axes)
fig.savefig(fname, bbox_inches='tight')
|
Write image to a file
|
oxasl/reporting.py
|
tofile
|
physimals/oxasl
| 1
|
python
|
def tofile(self, fname):
'\n \n '
if (Figure is None):
warnings.warn('matplotlib not installed - cannot generate images')
return
shape = None
for img in [self._img, self._bgimage, self._mask]:
if (img is None):
continue
if (not isinstance(img, Image)):
raise ValueError(('%s: Images must be instances of fsl.data.Image: %s' % (fname, img)))
if (shape is None):
shape = img.shape
if (img.ndim != 3):
raise ValueError('Images must be 3D')
if (img.shape != shape):
raise ValueError('Images do not have consistent shapes')
(min_slice, max_slice) = self._slicerange(self._img, shape)
num_slices = min(16, ((max_slice - min_slice) + 1))
grid_size = int(math.ceil(math.sqrt(num_slices)))
fig = Figure(figsize=(5, 5), dpi=200)
FigureCanvas(fig)
for nslice in range(num_slices):
axes = fig.add_subplot(grid_size, grid_size, (nslice + 1))
axes.set_yticklabels([])
axes.set_xticklabels([])
axes.set_xticks([])
axes.set_yticks([])
slice_idx = (int((float((((max_slice - min_slice) + 1) * nslice)) / num_slices)) + min_slice)
if self._bgimage:
data = self._bgimage.data[(:, :, slice_idx)].T
axes.imshow(data, cmap='gray')
if self._img:
data = self._img.data[(:, :, slice_idx)].T
data[(~ np.isfinite(data))] = 0
if issubclass(data.dtype.type, np.integer):
cmap = 'Reds'
(vmax, vmin) = (np.max(self._img.data), np.min(self._img.data))
else:
cmap = 'viridis'
(vmax, vmin) = (np.percentile(self._img.data, 99), np.percentile(self._img.data, 1))
if (vmax == vmin):
(vmax, vmin) = (np.max(self._img.data), np.min(self._img.data))
if self._clamp_colors:
data = np.clip(data, vmin, vmax)
if self._outline:
data = (data > 0.5).astype(np.int)
data = (data - scipy.ndimage.morphology.binary_erosion(data, structure=np.ones((3, 3))))
if self._mask:
data = np.ma.masked_array(data, (self._mask.data[(:, :, slice_idx)].T == 0))
elif self._zeromask:
data = np.ma.masked_array(data, (data == 0))
img = axes.imshow(data, vmax=vmax, vmin=vmin, cmap=cmap)
axes.set_ylim(axes.get_ylim()[::(- 1)])
fig.subplots_adjust(wspace=0, hspace=0.05)
if (self._img and self._colorbar):
fig.colorbar(img, ax=fig.axes)
fig.savefig(fname, bbox_inches='tight')
|
def tofile(self, fname):
'\n \n '
if (Figure is None):
warnings.warn('matplotlib not installed - cannot generate images')
return
shape = None
for img in [self._img, self._bgimage, self._mask]:
if (img is None):
continue
if (not isinstance(img, Image)):
raise ValueError(('%s: Images must be instances of fsl.data.Image: %s' % (fname, img)))
if (shape is None):
shape = img.shape
if (img.ndim != 3):
raise ValueError('Images must be 3D')
if (img.shape != shape):
raise ValueError('Images do not have consistent shapes')
(min_slice, max_slice) = self._slicerange(self._img, shape)
num_slices = min(16, ((max_slice - min_slice) + 1))
grid_size = int(math.ceil(math.sqrt(num_slices)))
fig = Figure(figsize=(5, 5), dpi=200)
FigureCanvas(fig)
for nslice in range(num_slices):
axes = fig.add_subplot(grid_size, grid_size, (nslice + 1))
axes.set_yticklabels([])
axes.set_xticklabels([])
axes.set_xticks([])
axes.set_yticks([])
slice_idx = (int((float((((max_slice - min_slice) + 1) * nslice)) / num_slices)) + min_slice)
if self._bgimage:
data = self._bgimage.data[(:, :, slice_idx)].T
axes.imshow(data, cmap='gray')
if self._img:
data = self._img.data[(:, :, slice_idx)].T
data[(~ np.isfinite(data))] = 0
if issubclass(data.dtype.type, np.integer):
cmap = 'Reds'
(vmax, vmin) = (np.max(self._img.data), np.min(self._img.data))
else:
cmap = 'viridis'
(vmax, vmin) = (np.percentile(self._img.data, 99), np.percentile(self._img.data, 1))
if (vmax == vmin):
(vmax, vmin) = (np.max(self._img.data), np.min(self._img.data))
if self._clamp_colors:
data = np.clip(data, vmin, vmax)
if self._outline:
data = (data > 0.5).astype(np.int)
data = (data - scipy.ndimage.morphology.binary_erosion(data, structure=np.ones((3, 3))))
if self._mask:
data = np.ma.masked_array(data, (self._mask.data[(:, :, slice_idx)].T == 0))
elif self._zeromask:
data = np.ma.masked_array(data, (data == 0))
img = axes.imshow(data, vmax=vmax, vmin=vmin, cmap=cmap)
axes.set_ylim(axes.get_ylim()[::(- 1)])
fig.subplots_adjust(wspace=0, hspace=0.05)
if (self._img and self._colorbar):
fig.colorbar(img, ax=fig.axes)
fig.savefig(fname, bbox_inches='tight')<|docstring|>Write image to a file<|endoftext|>
|
ca0ed80a7a375b1f7b48f2649014125398f84e3e2c6c2afdc599215a438e37c7
|
def __init__(self, data, xlabel, ylabel, **kwargs):
'\n :param *imgs: One or more ``fsl.data.Image`` instances. Later images will be\n overlaid onto earlier images\n :param zeromask: If True, treat zero values as transparent\n '
self._data = data
self._xlabel = xlabel
self._ylabel = ylabel
self._figsize = kwargs.get('figsize', (6, 3))
self.extension = '.png'
|
:param *imgs: One or more ``fsl.data.Image`` instances. Later images will be
overlaid onto earlier images
:param zeromask: If True, treat zero values as transparent
|
oxasl/reporting.py
|
__init__
|
physimals/oxasl
| 1
|
python
|
def __init__(self, data, xlabel, ylabel, **kwargs):
'\n :param *imgs: One or more ``fsl.data.Image`` instances. Later images will be\n overlaid onto earlier images\n :param zeromask: If True, treat zero values as transparent\n '
self._data = data
self._xlabel = xlabel
self._ylabel = ylabel
self._figsize = kwargs.get('figsize', (6, 3))
self.extension = '.png'
|
def __init__(self, data, xlabel, ylabel, **kwargs):
'\n :param *imgs: One or more ``fsl.data.Image`` instances. Later images will be\n overlaid onto earlier images\n :param zeromask: If True, treat zero values as transparent\n '
self._data = data
self._xlabel = xlabel
self._ylabel = ylabel
self._figsize = kwargs.get('figsize', (6, 3))
self.extension = '.png'<|docstring|>:param *imgs: One or more ``fsl.data.Image`` instances. Later images will be
overlaid onto earlier images
:param zeromask: If True, treat zero values as transparent<|endoftext|>
|
c6426f6f94246e5799aae49b7378f3aea019990afe60fa9ea19ce35c2fca6e83
|
def tofile(self, fname):
'\n Write image to a file\n '
if (Figure is None):
warnings.warn('matplotlib not installed - cannot generate graphs')
return
fig = Figure(figsize=self._figsize, dpi=200)
FigureCanvas(fig)
axes = fig.add_subplot(1, 1, 1)
axes.set_xlabel(self._xlabel)
axes.set_ylabel(self._ylabel)
axes.plot(self._data)
fig.savefig(fname, bbox_inches='tight')
|
Write image to a file
|
oxasl/reporting.py
|
tofile
|
physimals/oxasl
| 1
|
python
|
def tofile(self, fname):
'\n \n '
if (Figure is None):
warnings.warn('matplotlib not installed - cannot generate graphs')
return
fig = Figure(figsize=self._figsize, dpi=200)
FigureCanvas(fig)
axes = fig.add_subplot(1, 1, 1)
axes.set_xlabel(self._xlabel)
axes.set_ylabel(self._ylabel)
axes.plot(self._data)
fig.savefig(fname, bbox_inches='tight')
|
def tofile(self, fname):
'\n \n '
if (Figure is None):
warnings.warn('matplotlib not installed - cannot generate graphs')
return
fig = Figure(figsize=self._figsize, dpi=200)
FigureCanvas(fig)
axes = fig.add_subplot(1, 1, 1)
axes.set_xlabel(self._xlabel)
axes.set_ylabel(self._ylabel)
axes.plot(self._data)
fig.savefig(fname, bbox_inches='tight')<|docstring|>Write image to a file<|endoftext|>
|
052749b1e1f0975870bf5a4e2b6e0f62752c89d0d231ab883eff05e123073d83
|
def image(self, name, img_obj):
'\n Add a block-level image\n '
self._report.add(name, img_obj)
self._content += ('.. image:: %s%s\n\n' % (name, img_obj.extension))
|
Add a block-level image
|
oxasl/reporting.py
|
image
|
physimals/oxasl
| 1
|
python
|
def image(self, name, img_obj):
'\n \n '
self._report.add(name, img_obj)
self._content += ('.. image:: %s%s\n\n' % (name, img_obj.extension))
|
def image(self, name, img_obj):
'\n \n '
self._report.add(name, img_obj)
self._content += ('.. image:: %s%s\n\n' % (name, img_obj.extension))<|docstring|>Add a block-level image<|endoftext|>
|
8187d40d6d6c55ed623a7433f83521d2d4b5333f7be60dc43c0292da5bbeaf41
|
def text(self, txt):
'\n Add a line of text content\n '
self._content += (txt + '\n\n')
|
Add a line of text content
|
oxasl/reporting.py
|
text
|
physimals/oxasl
| 1
|
python
|
def text(self, txt):
'\n \n '
self._content += (txt + '\n\n')
|
def text(self, txt):
'\n \n '
self._content += (txt + '\n\n')<|docstring|>Add a line of text content<|endoftext|>
|
d44f7ece8b2819158283d1ad4f2dfcd012595f633c6afb4ca388d8d4b657b3e1
|
def maths(self, content):
'\n Write mathematical content\n '
self.text('.. math::')
if isinstance(content, six.string_types):
content = content.splitlines()
for line in content:
self._content += ((' ' + line) + '\n')
self._content += '\n'
|
Write mathematical content
|
oxasl/reporting.py
|
maths
|
physimals/oxasl
| 1
|
python
|
def maths(self, content):
'\n \n '
self.text('.. math::')
if isinstance(content, six.string_types):
content = content.splitlines()
for line in content:
self._content += ((' ' + line) + '\n')
self._content += '\n'
|
def maths(self, content):
'\n \n '
self.text('.. math::')
if isinstance(content, six.string_types):
content = content.splitlines()
for line in content:
self._content += ((' ' + line) + '\n')
self._content += '\n'<|docstring|>Write mathematical content<|endoftext|>
|
96a8adb0821aa2a1e9828eef2d90ec82cff9367d3f0af91690a0cb7c8f183982
|
def matrix(self, mat, sig_fig=3):
'\n Add a matrix of numbers\n '
matrix_latex = '\\begin{bmatrix}\n'
for row in mat:
matrix_latex += (' & '.join([self._latex_float(v, sig_fig) for v in row]) + ' \\\\\n')
matrix_latex += '\\end{bmatrix}\n'
self.maths(matrix_latex)
|
Add a matrix of numbers
|
oxasl/reporting.py
|
matrix
|
physimals/oxasl
| 1
|
python
|
def matrix(self, mat, sig_fig=3):
'\n \n '
matrix_latex = '\\begin{bmatrix}\n'
for row in mat:
matrix_latex += (' & '.join([self._latex_float(v, sig_fig) for v in row]) + ' \\\\\n')
matrix_latex += '\\end{bmatrix}\n'
self.maths(matrix_latex)
|
def matrix(self, mat, sig_fig=3):
'\n \n '
matrix_latex = '\\begin{bmatrix}\n'
for row in mat:
matrix_latex += (' & '.join([self._latex_float(v, sig_fig) for v in row]) + ' \\\\\n')
matrix_latex += '\\end{bmatrix}\n'
self.maths(matrix_latex)<|docstring|>Add a matrix of numbers<|endoftext|>
|
6d7761883035b67830e574f92dce31e85a438f0888b18a81d9537d7ccba5de9c
|
def heading(self, txt, level=0):
'\n Add a heading\n '
if (level >= len(self._heading_chars)):
raise ValueError(('Unsupported heading level: %i' % level))
self._content += (txt + '\n')
self._content += ((self._heading_chars[level] * len(txt)) + '\n\n')
|
Add a heading
|
oxasl/reporting.py
|
heading
|
physimals/oxasl
| 1
|
python
|
def heading(self, txt, level=0):
'\n \n '
if (level >= len(self._heading_chars)):
raise ValueError(('Unsupported heading level: %i' % level))
self._content += (txt + '\n')
self._content += ((self._heading_chars[level] * len(txt)) + '\n\n')
|
def heading(self, txt, level=0):
'\n \n '
if (level >= len(self._heading_chars)):
raise ValueError(('Unsupported heading level: %i' % level))
self._content += (txt + '\n')
self._content += ((self._heading_chars[level] * len(txt)) + '\n\n')<|docstring|>Add a heading<|endoftext|>
|
644c5a73ea2b865d82c8163f9d087292c57084f51be4ec234321c180c3b4adc2
|
def table(self, tabdata, name='', headers=None, align=None):
'\n Add a table\n '
self._content += (('.. csv-table:: ' + name) + '\n')
if align:
self._content += ((' :align: ' + align) + '\n')
if headers:
self._content += ((' :header: ' + ','.join([('"%s"' % h) for h in headers])) + '\n')
self._content += '\n'
csvtxt = six.StringIO()
writer = csv.writer(csvtxt)
if six.PY2:
for row in tabdata:
writer.writerow([unicode(s).encode('utf-8') for s in row])
for line in csvtxt.getvalue().splitlines():
self._content += ((' ' + line.decode('utf-8')) + '\n')
else:
for row in tabdata:
writer.writerow([str(s) for s in row])
for line in csvtxt.getvalue().splitlines():
self._content += ((' ' + line) + '\n')
self._content += '\n'
|
Add a table
|
oxasl/reporting.py
|
table
|
physimals/oxasl
| 1
|
python
|
def table(self, tabdata, name=, headers=None, align=None):
'\n \n '
self._content += (('.. csv-table:: ' + name) + '\n')
if align:
self._content += ((' :align: ' + align) + '\n')
if headers:
self._content += ((' :header: ' + ','.join([('"%s"' % h) for h in headers])) + '\n')
self._content += '\n'
csvtxt = six.StringIO()
writer = csv.writer(csvtxt)
if six.PY2:
for row in tabdata:
writer.writerow([unicode(s).encode('utf-8') for s in row])
for line in csvtxt.getvalue().splitlines():
self._content += ((' ' + line.decode('utf-8')) + '\n')
else:
for row in tabdata:
writer.writerow([str(s) for s in row])
for line in csvtxt.getvalue().splitlines():
self._content += ((' ' + line) + '\n')
self._content += '\n'
|
def table(self, tabdata, name=, headers=None, align=None):
'\n \n '
self._content += (('.. csv-table:: ' + name) + '\n')
if align:
self._content += ((' :align: ' + align) + '\n')
if headers:
self._content += ((' :header: ' + ','.join([('"%s"' % h) for h in headers])) + '\n')
self._content += '\n'
csvtxt = six.StringIO()
writer = csv.writer(csvtxt)
if six.PY2:
for row in tabdata:
writer.writerow([unicode(s).encode('utf-8') for s in row])
for line in csvtxt.getvalue().splitlines():
self._content += ((' ' + line.decode('utf-8')) + '\n')
else:
for row in tabdata:
writer.writerow([str(s) for s in row])
for line in csvtxt.getvalue().splitlines():
self._content += ((' ' + line) + '\n')
self._content += '\n'<|docstring|>Add a table<|endoftext|>
|
461671addd2cf118be629a892e3cd8ebc05492ffcc9018eeb0a78076ac2d7737
|
def dicttable(self, dictionary):
"\n Add table based on the contents of a python dictionary.\n\n The columns are 'Key' and 'Value'\n "
tabdata = dictionary.items()
self.table(tabdata, headers=('Key', 'Value'))
|
Add table based on the contents of a python dictionary.
The columns are 'Key' and 'Value'
|
oxasl/reporting.py
|
dicttable
|
physimals/oxasl
| 1
|
python
|
def dicttable(self, dictionary):
"\n Add table based on the contents of a python dictionary.\n\n The columns are 'Key' and 'Value'\n "
tabdata = dictionary.items()
self.table(tabdata, headers=('Key', 'Value'))
|
def dicttable(self, dictionary):
"\n Add table based on the contents of a python dictionary.\n\n The columns are 'Key' and 'Value'\n "
tabdata = dictionary.items()
self.table(tabdata, headers=('Key', 'Value'))<|docstring|>Add table based on the contents of a python dictionary.
The columns are 'Key' and 'Value'<|endoftext|>
|
c767f3224644e9dde967d8a249c1cb671e81d5372f370aefaa9285f26872f293
|
def tofile(self, fname):
'\n Write RST content to a file\n '
with open(fname, 'wb') as rstfile:
rstfile.write(self._content.encode('utf-8'))
|
Write RST content to a file
|
oxasl/reporting.py
|
tofile
|
physimals/oxasl
| 1
|
python
|
def tofile(self, fname):
'\n \n '
with open(fname, 'wb') as rstfile:
rstfile.write(self._content.encode('utf-8'))
|
def tofile(self, fname):
'\n \n '
with open(fname, 'wb') as rstfile:
rstfile.write(self._content.encode('utf-8'))<|docstring|>Write RST content to a file<|endoftext|>
|
72b95393fe2e8236bf1cb43cda2985774204f430c93e0ec0c1ede425683d408b
|
def _latex_float(self, val, sig_fig=3):
'\n Format float in format suitable for Latex - nicked off StackOverflow!\n '
pyformat = ('{0:.%ig}' % sig_fig)
float_str = pyformat.format(val)
if ('e' in float_str):
(base, exponent) = float_str.split('e')
return '{0} \\times 10^{{{1}}}'.format(base, int(exponent))
else:
return float_str
|
Format float in format suitable for Latex - nicked off StackOverflow!
|
oxasl/reporting.py
|
_latex_float
|
physimals/oxasl
| 1
|
python
|
def _latex_float(self, val, sig_fig=3):
'\n \n '
pyformat = ('{0:.%ig}' % sig_fig)
float_str = pyformat.format(val)
if ('e' in float_str):
(base, exponent) = float_str.split('e')
return '{0} \\times 10^{{{1}}}'.format(base, int(exponent))
else:
return float_str
|
def _latex_float(self, val, sig_fig=3):
'\n \n '
pyformat = ('{0:.%ig}' % sig_fig)
float_str = pyformat.format(val)
if ('e' in float_str):
(base, exponent) = float_str.split('e')
return '{0} \\times 10^{{{1}}}'.format(base, int(exponent))
else:
return float_str<|docstring|>Format float in format suitable for Latex - nicked off StackOverflow!<|endoftext|>
|
6725e37b2369b73f18397f8e7c6652b1ea86d332db1af7ac7664d7223d37b4ae
|
def generate(self, dest_dir, build_dir=None, log=sys.stdout, keep_build_dir=False, doctype='html'):
'\n Generate an output report\n '
build_dir = self._build_src(build_dir, log)
try:
if os.path.exists(dest_dir):
if (not os.path.isdir(dest_dir)):
raise ValueError(('Report destination directory %s exists but is not a directory' % dest_dir))
else:
warnings.warn(('Report destination directory %s already exists - removing' % dest_dir))
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
args = [which('sphinx-build'), '-b', doctype, build_dir, dest_dir]
if (args[0] is not None):
import subprocess
result = subprocess.call(args)
else:
import sphinx
if hasattr(sphinx, 'main'):
result = sphinx.main(args)
else:
import sphinx.cmd.build
result = sphinx.cmd.build.main(args[1:])
except (AttributeError, ImportError, OSError):
log.write('WARNING: sphinx not found, HTML report will not be generated\n')
return False
except Exception as exc:
log.write('WARNING: sphinx failed, HTML report will not be generated\n')
log.write(('Message: %s\n' % str(exc)))
return False
finally:
if (not keep_build_dir):
shutil.rmtree(build_dir)
return True
|
Generate an output report
|
oxasl/reporting.py
|
generate
|
physimals/oxasl
| 1
|
python
|
def generate(self, dest_dir, build_dir=None, log=sys.stdout, keep_build_dir=False, doctype='html'):
'\n \n '
build_dir = self._build_src(build_dir, log)
try:
if os.path.exists(dest_dir):
if (not os.path.isdir(dest_dir)):
raise ValueError(('Report destination directory %s exists but is not a directory' % dest_dir))
else:
warnings.warn(('Report destination directory %s already exists - removing' % dest_dir))
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
args = [which('sphinx-build'), '-b', doctype, build_dir, dest_dir]
if (args[0] is not None):
import subprocess
result = subprocess.call(args)
else:
import sphinx
if hasattr(sphinx, 'main'):
result = sphinx.main(args)
else:
import sphinx.cmd.build
result = sphinx.cmd.build.main(args[1:])
except (AttributeError, ImportError, OSError):
log.write('WARNING: sphinx not found, HTML report will not be generated\n')
return False
except Exception as exc:
log.write('WARNING: sphinx failed, HTML report will not be generated\n')
log.write(('Message: %s\n' % str(exc)))
return False
finally:
if (not keep_build_dir):
shutil.rmtree(build_dir)
return True
|
def generate(self, dest_dir, build_dir=None, log=sys.stdout, keep_build_dir=False, doctype='html'):
'\n \n '
build_dir = self._build_src(build_dir, log)
try:
if os.path.exists(dest_dir):
if (not os.path.isdir(dest_dir)):
raise ValueError(('Report destination directory %s exists but is not a directory' % dest_dir))
else:
warnings.warn(('Report destination directory %s already exists - removing' % dest_dir))
shutil.rmtree(dest_dir)
os.makedirs(dest_dir)
args = [which('sphinx-build'), '-b', doctype, build_dir, dest_dir]
if (args[0] is not None):
import subprocess
result = subprocess.call(args)
else:
import sphinx
if hasattr(sphinx, 'main'):
result = sphinx.main(args)
else:
import sphinx.cmd.build
result = sphinx.cmd.build.main(args[1:])
except (AttributeError, ImportError, OSError):
log.write('WARNING: sphinx not found, HTML report will not be generated\n')
return False
except Exception as exc:
log.write('WARNING: sphinx failed, HTML report will not be generated\n')
log.write(('Message: %s\n' % str(exc)))
return False
finally:
if (not keep_build_dir):
shutil.rmtree(build_dir)
return True<|docstring|>Generate an output report<|endoftext|>
|
742edd90e219c6e56a844b8875de0ba54b7382e8fd324282b596512bc89eea23
|
def tofile(self, build_dir):
'\n Write the report contents out to a build directory.\n\n This is the RST and image content - not the built HTML/PDF documentation\n '
if (not os.path.exists(build_dir)):
os.makedirs(build_dir)
with open(os.path.join(build_dir, 'index.rst'), 'wb') as indexfile:
indexfile.write(self._encode((self.title + '\n')))
indexfile.write(self._encode((('=' * len(self.title)) + '\n\n')))
if self._include_timings:
self._timings(indexfile)
if self._include_toc:
self._toc(indexfile)
indexfile.write(self._encode(self._content))
for (fname, content) in self._files.items():
try:
content.tofile(os.path.join(build_dir, fname))
except Exception as exc:
import traceback
traceback.print_exc()
warnings.warn(('Error writing report content %s to file: %s' % (fname, exc)))
|
Write the report contents out to a build directory.
This is the RST and image content - not the built HTML/PDF documentation
|
oxasl/reporting.py
|
tofile
|
physimals/oxasl
| 1
|
python
|
def tofile(self, build_dir):
'\n Write the report contents out to a build directory.\n\n This is the RST and image content - not the built HTML/PDF documentation\n '
if (not os.path.exists(build_dir)):
os.makedirs(build_dir)
with open(os.path.join(build_dir, 'index.rst'), 'wb') as indexfile:
indexfile.write(self._encode((self.title + '\n')))
indexfile.write(self._encode((('=' * len(self.title)) + '\n\n')))
if self._include_timings:
self._timings(indexfile)
if self._include_toc:
self._toc(indexfile)
indexfile.write(self._encode(self._content))
for (fname, content) in self._files.items():
try:
content.tofile(os.path.join(build_dir, fname))
except Exception as exc:
import traceback
traceback.print_exc()
warnings.warn(('Error writing report content %s to file: %s' % (fname, exc)))
|
def tofile(self, build_dir):
'\n Write the report contents out to a build directory.\n\n This is the RST and image content - not the built HTML/PDF documentation\n '
if (not os.path.exists(build_dir)):
os.makedirs(build_dir)
with open(os.path.join(build_dir, 'index.rst'), 'wb') as indexfile:
indexfile.write(self._encode((self.title + '\n')))
indexfile.write(self._encode((('=' * len(self.title)) + '\n\n')))
if self._include_timings:
self._timings(indexfile)
if self._include_toc:
self._toc(indexfile)
indexfile.write(self._encode(self._content))
for (fname, content) in self._files.items():
try:
content.tofile(os.path.join(build_dir, fname))
except Exception as exc:
import traceback
traceback.print_exc()
warnings.warn(('Error writing report content %s to file: %s' % (fname, exc)))<|docstring|>Write the report contents out to a build directory.
This is the RST and image content - not the built HTML/PDF documentation<|endoftext|>
|
31193199f1906618e929e7bcb266671c069b3e9f5f5d5f6cd8aff8f27b73555f
|
def page(self, name, overwrite=False, **kwargs):
'\n Add a page to the report. The page is returned for content to be added\n\n :param name: Name of the page.\n :param overwrite: If True, and page already exists with the same ``name``,\n replace content. Otherwise an exception is thrown.\n '
page = ReportPage(self, **kwargs)
self.add(name, page, overwrite)
return page
|
Add a page to the report. The page is returned for content to be added
:param name: Name of the page.
:param overwrite: If True, and page already exists with the same ``name``,
replace content. Otherwise an exception is thrown.
|
oxasl/reporting.py
|
page
|
physimals/oxasl
| 1
|
python
|
def page(self, name, overwrite=False, **kwargs):
'\n Add a page to the report. The page is returned for content to be added\n\n :param name: Name of the page.\n :param overwrite: If True, and page already exists with the same ``name``,\n replace content. Otherwise an exception is thrown.\n '
page = ReportPage(self, **kwargs)
self.add(name, page, overwrite)
return page
|
def page(self, name, overwrite=False, **kwargs):
'\n Add a page to the report. The page is returned for content to be added\n\n :param name: Name of the page.\n :param overwrite: If True, and page already exists with the same ``name``,\n replace content. Otherwise an exception is thrown.\n '
page = ReportPage(self, **kwargs)
self.add(name, page, overwrite)
return page<|docstring|>Add a page to the report. The page is returned for content to be added
:param name: Name of the page.
:param overwrite: If True, and page already exists with the same ``name``,
replace content. Otherwise an exception is thrown.<|endoftext|>
|
b55efcc74112ca234b2e784f4a35c7c5b9c035b0bb2a69a7c72125b99d52e1e3
|
def add(self, name, content, overwrite=False):
'\n Add content to a report\n\n :param name: Name of the content.\n :param content: Content object which has ``extension`` attribute and supports ``tofile()`` method\n :param overwrite: If True, and content already exists with the same ``name`` and extension,\n replace content. Otherwise an exception is thrown.\n\n :return: Name given to the object in the report. If ``overwrite=True`` this will be the same as ``name``\n '
fname = (name + content.extension)
if (not overwrite):
idx = 2
while (fname in self._files):
name = (name + ('_%i' % idx))
fname = (name + content.extension)
self._files[fname] = content
if isinstance(content, ReportPage):
self._contents.append(name)
if isinstance(content, Report):
self._contents.append((name + '/index'))
return name
|
Add content to a report
:param name: Name of the content.
:param content: Content object which has ``extension`` attribute and supports ``tofile()`` method
:param overwrite: If True, and content already exists with the same ``name`` and extension,
replace content. Otherwise an exception is thrown.
:return: Name given to the object in the report. If ``overwrite=True`` this will be the same as ``name``
|
oxasl/reporting.py
|
add
|
physimals/oxasl
| 1
|
python
|
def add(self, name, content, overwrite=False):
'\n Add content to a report\n\n :param name: Name of the content.\n :param content: Content object which has ``extension`` attribute and supports ``tofile()`` method\n :param overwrite: If True, and content already exists with the same ``name`` and extension,\n replace content. Otherwise an exception is thrown.\n\n :return: Name given to the object in the report. If ``overwrite=True`` this will be the same as ``name``\n '
fname = (name + content.extension)
if (not overwrite):
idx = 2
while (fname in self._files):
name = (name + ('_%i' % idx))
fname = (name + content.extension)
self._files[fname] = content
if isinstance(content, ReportPage):
self._contents.append(name)
if isinstance(content, Report):
self._contents.append((name + '/index'))
return name
|
def add(self, name, content, overwrite=False):
'\n Add content to a report\n\n :param name: Name of the content.\n :param content: Content object which has ``extension`` attribute and supports ``tofile()`` method\n :param overwrite: If True, and content already exists with the same ``name`` and extension,\n replace content. Otherwise an exception is thrown.\n\n :return: Name given to the object in the report. If ``overwrite=True`` this will be the same as ``name``\n '
fname = (name + content.extension)
if (not overwrite):
idx = 2
while (fname in self._files):
name = (name + ('_%i' % idx))
fname = (name + content.extension)
self._files[fname] = content
if isinstance(content, ReportPage):
self._contents.append(name)
if isinstance(content, Report):
self._contents.append((name + '/index'))
return name<|docstring|>Add content to a report
:param name: Name of the content.
:param content: Content object which has ``extension`` attribute and supports ``tofile()`` method
:param overwrite: If True, and content already exists with the same ``name`` and extension,
replace content. Otherwise an exception is thrown.
:return: Name given to the object in the report. If ``overwrite=True`` this will be the same as ``name``<|endoftext|>
|
b26df17d0c56012175796f63d679e19ec88dbcd1899e9d5210a8610f725283b9
|
def send_mail(send_from, send_to, subject, message, cc=None, server='localhost', use_tls=True, port=587, username='', password=''):
'Compose and send email with provided info and attachments.\n\n Arguments:\n send_from (str): from name\n send_to (str): to name\n subject (str): message title\n message (str): message body\n files (list[str]): list of file paths to be attached to email\n cc (str): cc name\n server (str): mail server host name\n use_tls (bool): use TLS mode\n port (int): port number\n username (str): server auth username\n password (str): server auth password\n '
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import formatdate
from email import encoders
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = send_to
if cc:
msg['Cc'] = cc
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(message))
smtp = smtplib.SMTP(server, port)
if use_tls:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(username, password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.quit()
|
Compose and send email with provided info and attachments.
Arguments:
send_from (str): from name
send_to (str): to name
subject (str): message title
message (str): message body
files (list[str]): list of file paths to be attached to email
cc (str): cc name
server (str): mail server host name
use_tls (bool): use TLS mode
port (int): port number
username (str): server auth username
password (str): server auth password
|
airflow/dags/starter.py
|
send_mail
|
yaojiach/docker-airflow-boilerplate
| 1
|
python
|
def send_mail(send_from, send_to, subject, message, cc=None, server='localhost', use_tls=True, port=587, username=, password=):
'Compose and send email with provided info and attachments.\n\n Arguments:\n send_from (str): from name\n send_to (str): to name\n subject (str): message title\n message (str): message body\n files (list[str]): list of file paths to be attached to email\n cc (str): cc name\n server (str): mail server host name\n use_tls (bool): use TLS mode\n port (int): port number\n username (str): server auth username\n password (str): server auth password\n '
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import formatdate
from email import encoders
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = send_to
if cc:
msg['Cc'] = cc
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(message))
smtp = smtplib.SMTP(server, port)
if use_tls:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(username, password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.quit()
|
def send_mail(send_from, send_to, subject, message, cc=None, server='localhost', use_tls=True, port=587, username=, password=):
'Compose and send email with provided info and attachments.\n\n Arguments:\n send_from (str): from name\n send_to (str): to name\n subject (str): message title\n message (str): message body\n files (list[str]): list of file paths to be attached to email\n cc (str): cc name\n server (str): mail server host name\n use_tls (bool): use TLS mode\n port (int): port number\n username (str): server auth username\n password (str): server auth password\n '
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import formatdate
from email import encoders
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = send_to
if cc:
msg['Cc'] = cc
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(message))
smtp = smtplib.SMTP(server, port)
if use_tls:
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(username, password)
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.quit()<|docstring|>Compose and send email with provided info and attachments.
Arguments:
send_from (str): from name
send_to (str): to name
subject (str): message title
message (str): message body
files (list[str]): list of file paths to be attached to email
cc (str): cc name
server (str): mail server host name
use_tls (bool): use TLS mode
port (int): port number
username (str): server auth username
password (str): server auth password<|endoftext|>
|
22578495d51fd50320d1c7172d192fd9656c2260e97018b6eb7e9669eddc12b1
|
def train(config, restore=False):
'\n Train the dataset from given paths of dataset.\n :param config: type dict: config parameter\n :param restore: type bool, True if resume training from the last checkpoint\n :return: models: type list of model, trained model\n :return: histories type list of of list of float, metrics evaluating value from each epoch.\n '
(models, histories) = ([], [])
for (pickle_path, pickle_max_shape, dataset) in zip(config['filename_tfrec_pickle'], config['filename_max_shape_pickle'], config['dataset']):
if restore:
with open(((((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/') + 'training_info.pickle'), 'rb') as fp:
restore_dataset = pickle.load(fp)[dataset]
if (restore_dataset != dataset):
continue
else:
print('Resume training dataset: ', dataset, '...')
config = train_config_setting(config, dataset)
if config['read_body_identification']:
split_filename = (((config['dir_dataset_info'] + '/split_paths_') + dataset) + '_bi.pickle')
else:
split_filename = (((config['dir_dataset_info'] + '/split_paths_') + dataset) + '.pickle')
with open(split_filename, 'rb') as fp:
paths = pickle.load(fp)
if (not config['train_premodel']):
call_model = getattr(ModelSet, config['model'])
(model, list_metric_names) = call_model(self=ModelSet, config=config)
else:
model = load_model_file(config, dataset, compile=True)
print(model.summary())
if (not os.path.exists(((config['dir_model_checkpoint'] + '/') + config['exp_name']))):
os.makedirs(((config['dir_model_checkpoint'] + '/') + config['exp_name']))
checkpoint_path = (((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/cp.hdf5')
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_weights_only=False, period=config['save_training_model_period'])
init_epoch = 0
if restore:
model.load_weights(checkpoint_path)
with open(((((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/') + 'training_info.pickle'), 'rb') as fp:
init_epoch = (pickle.load(fp)['epoch'] + 1)
restore = False
class Additional_Saver(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (((epoch % config['save_training_model_period']) == 0) and (epoch != 0)):
with open((((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/training_info.pickle'), 'wb') as fp:
pickle.dump({'epoch': epoch, 'dataype': dataset}, fp, protocol=pickle.HIGHEST_PROTOCOL)
if (not os.path.exists('train_record')):
os.makedirs('train_record')
file1 = open((((('train_record/' + config['model']) + '_') + dataset) + '.txt'), 'a+')
now = datetime.datetime.now()
file1.write(((((((((('dataset: ' + dataset) + ', Epoch: ') + str(epoch)) + ', Model: ') + config['model']) + ', time: ') + now.strftime('%Y-%m-%d %H:%M:%S')) + ', pid:') + str(os.getpid())))
file1.write('\n')
file1.close()
saver1 = Additional_Saver()
print('Now training data: ', dataset)
k_fold = config['k_fold'][dataset]
history_dataset = []
if (k_fold is not None):
(model, history) = k_fold_train_process(config, model, k_fold, paths, dataset, cp_callback, init_epoch, saver1)
else:
(model, history) = train_process(config, model, paths['path_train_img'], paths['path_train_label'], paths['path_val_img'], paths['path_val_label'], dataset, cp_callback, saver1, k_fold_index=0, init_epoch=init_epoch)
history_dataset.append(history)
saved_model_path = ((((config['saved_models_dir'] + '/') + config['exp_name']) + '/') + config['model'])
if (not os.path.exists(saved_model_path)):
os.makedirs(saved_model_path)
model.save((((saved_model_path + '/') + dataset) + '.h5'))
print('Training data ', dataset, 'is finished')
models.append(model)
histories.append(history_dataset)
return (models, histories)
|
Train the dataset from given paths of dataset.
:param config: type dict: config parameter
:param restore: type bool, True if resume training from the last checkpoint
:return: models: type list of model, trained model
:return: histories type list of of list of float, metrics evaluating value from each epoch.
|
train.py
|
train
|
mourmoerl/med_segmentation
| 0
|
python
|
def train(config, restore=False):
'\n Train the dataset from given paths of dataset.\n :param config: type dict: config parameter\n :param restore: type bool, True if resume training from the last checkpoint\n :return: models: type list of model, trained model\n :return: histories type list of of list of float, metrics evaluating value from each epoch.\n '
(models, histories) = ([], [])
for (pickle_path, pickle_max_shape, dataset) in zip(config['filename_tfrec_pickle'], config['filename_max_shape_pickle'], config['dataset']):
if restore:
with open(((((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/') + 'training_info.pickle'), 'rb') as fp:
restore_dataset = pickle.load(fp)[dataset]
if (restore_dataset != dataset):
continue
else:
print('Resume training dataset: ', dataset, '...')
config = train_config_setting(config, dataset)
if config['read_body_identification']:
split_filename = (((config['dir_dataset_info'] + '/split_paths_') + dataset) + '_bi.pickle')
else:
split_filename = (((config['dir_dataset_info'] + '/split_paths_') + dataset) + '.pickle')
with open(split_filename, 'rb') as fp:
paths = pickle.load(fp)
if (not config['train_premodel']):
call_model = getattr(ModelSet, config['model'])
(model, list_metric_names) = call_model(self=ModelSet, config=config)
else:
model = load_model_file(config, dataset, compile=True)
print(model.summary())
if (not os.path.exists(((config['dir_model_checkpoint'] + '/') + config['exp_name']))):
os.makedirs(((config['dir_model_checkpoint'] + '/') + config['exp_name']))
checkpoint_path = (((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/cp.hdf5')
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_weights_only=False, period=config['save_training_model_period'])
init_epoch = 0
if restore:
model.load_weights(checkpoint_path)
with open(((((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/') + 'training_info.pickle'), 'rb') as fp:
init_epoch = (pickle.load(fp)['epoch'] + 1)
restore = False
class Additional_Saver(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (((epoch % config['save_training_model_period']) == 0) and (epoch != 0)):
with open((((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/training_info.pickle'), 'wb') as fp:
pickle.dump({'epoch': epoch, 'dataype': dataset}, fp, protocol=pickle.HIGHEST_PROTOCOL)
if (not os.path.exists('train_record')):
os.makedirs('train_record')
file1 = open((((('train_record/' + config['model']) + '_') + dataset) + '.txt'), 'a+')
now = datetime.datetime.now()
file1.write(((((((((('dataset: ' + dataset) + ', Epoch: ') + str(epoch)) + ', Model: ') + config['model']) + ', time: ') + now.strftime('%Y-%m-%d %H:%M:%S')) + ', pid:') + str(os.getpid())))
file1.write('\n')
file1.close()
saver1 = Additional_Saver()
print('Now training data: ', dataset)
k_fold = config['k_fold'][dataset]
history_dataset = []
if (k_fold is not None):
(model, history) = k_fold_train_process(config, model, k_fold, paths, dataset, cp_callback, init_epoch, saver1)
else:
(model, history) = train_process(config, model, paths['path_train_img'], paths['path_train_label'], paths['path_val_img'], paths['path_val_label'], dataset, cp_callback, saver1, k_fold_index=0, init_epoch=init_epoch)
history_dataset.append(history)
saved_model_path = ((((config['saved_models_dir'] + '/') + config['exp_name']) + '/') + config['model'])
if (not os.path.exists(saved_model_path)):
os.makedirs(saved_model_path)
model.save((((saved_model_path + '/') + dataset) + '.h5'))
print('Training data ', dataset, 'is finished')
models.append(model)
histories.append(history_dataset)
return (models, histories)
|
def train(config, restore=False):
'\n Train the dataset from given paths of dataset.\n :param config: type dict: config parameter\n :param restore: type bool, True if resume training from the last checkpoint\n :return: models: type list of model, trained model\n :return: histories type list of of list of float, metrics evaluating value from each epoch.\n '
(models, histories) = ([], [])
for (pickle_path, pickle_max_shape, dataset) in zip(config['filename_tfrec_pickle'], config['filename_max_shape_pickle'], config['dataset']):
if restore:
with open(((((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/') + 'training_info.pickle'), 'rb') as fp:
restore_dataset = pickle.load(fp)[dataset]
if (restore_dataset != dataset):
continue
else:
print('Resume training dataset: ', dataset, '...')
config = train_config_setting(config, dataset)
if config['read_body_identification']:
split_filename = (((config['dir_dataset_info'] + '/split_paths_') + dataset) + '_bi.pickle')
else:
split_filename = (((config['dir_dataset_info'] + '/split_paths_') + dataset) + '.pickle')
with open(split_filename, 'rb') as fp:
paths = pickle.load(fp)
if (not config['train_premodel']):
call_model = getattr(ModelSet, config['model'])
(model, list_metric_names) = call_model(self=ModelSet, config=config)
else:
model = load_model_file(config, dataset, compile=True)
print(model.summary())
if (not os.path.exists(((config['dir_model_checkpoint'] + '/') + config['exp_name']))):
os.makedirs(((config['dir_model_checkpoint'] + '/') + config['exp_name']))
checkpoint_path = (((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/cp.hdf5')
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_weights_only=False, period=config['save_training_model_period'])
init_epoch = 0
if restore:
model.load_weights(checkpoint_path)
with open(((((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/') + 'training_info.pickle'), 'rb') as fp:
init_epoch = (pickle.load(fp)['epoch'] + 1)
restore = False
class Additional_Saver(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (((epoch % config['save_training_model_period']) == 0) and (epoch != 0)):
with open((((config['dir_model_checkpoint'] + '/') + config['exp_name']) + '/training_info.pickle'), 'wb') as fp:
pickle.dump({'epoch': epoch, 'dataype': dataset}, fp, protocol=pickle.HIGHEST_PROTOCOL)
if (not os.path.exists('train_record')):
os.makedirs('train_record')
file1 = open((((('train_record/' + config['model']) + '_') + dataset) + '.txt'), 'a+')
now = datetime.datetime.now()
file1.write(((((((((('dataset: ' + dataset) + ', Epoch: ') + str(epoch)) + ', Model: ') + config['model']) + ', time: ') + now.strftime('%Y-%m-%d %H:%M:%S')) + ', pid:') + str(os.getpid())))
file1.write('\n')
file1.close()
saver1 = Additional_Saver()
print('Now training data: ', dataset)
k_fold = config['k_fold'][dataset]
history_dataset = []
if (k_fold is not None):
(model, history) = k_fold_train_process(config, model, k_fold, paths, dataset, cp_callback, init_epoch, saver1)
else:
(model, history) = train_process(config, model, paths['path_train_img'], paths['path_train_label'], paths['path_val_img'], paths['path_val_label'], dataset, cp_callback, saver1, k_fold_index=0, init_epoch=init_epoch)
history_dataset.append(history)
saved_model_path = ((((config['saved_models_dir'] + '/') + config['exp_name']) + '/') + config['model'])
if (not os.path.exists(saved_model_path)):
os.makedirs(saved_model_path)
model.save((((saved_model_path + '/') + dataset) + '.h5'))
print('Training data ', dataset, 'is finished')
models.append(model)
histories.append(history_dataset)
return (models, histories)<|docstring|>Train the dataset from given paths of dataset.
:param config: type dict: config parameter
:param restore: type bool, True if resume training from the last checkpoint
:return: models: type list of model, trained model
:return: histories type list of of list of float, metrics evaluating value from each epoch.<|endoftext|>
|
d1720630672c49b7fdca1200488d2021b3f6243b5bebcbae20160c7298630647
|
def train_process(config, model, paths_train_img, paths_train_label, paths_val_img, paths_val_label, dataset, cp_callback, saver1, k_fold_index=0, init_epoch=0):
'Internal function'
ds_train = pipeline(config, paths_train_img, paths_train_label, dataset=dataset)
ds_validation = pipeline(config, paths_val_img, paths_val_label, dataset=dataset)
history = model.fit(ds_train, epochs=(config['epochs'] + init_epoch), steps_per_epoch=config['train_steps_per_epoch'], callbacks=[cp_callback, saver1], initial_epoch=init_epoch, validation_data=ds_validation, validation_steps=config['val_steps_per_epoch'], validation_freq=config['validation_freq'], verbose=config['train_verbose_mode'])
print(history.history)
save_histories_plot_images(history, config=config, dataset=dataset, mode='train_val', k_fold_index=k_fold_index)
return (model, history)
|
Internal function
|
train.py
|
train_process
|
mourmoerl/med_segmentation
| 0
|
python
|
def train_process(config, model, paths_train_img, paths_train_label, paths_val_img, paths_val_label, dataset, cp_callback, saver1, k_fold_index=0, init_epoch=0):
ds_train = pipeline(config, paths_train_img, paths_train_label, dataset=dataset)
ds_validation = pipeline(config, paths_val_img, paths_val_label, dataset=dataset)
history = model.fit(ds_train, epochs=(config['epochs'] + init_epoch), steps_per_epoch=config['train_steps_per_epoch'], callbacks=[cp_callback, saver1], initial_epoch=init_epoch, validation_data=ds_validation, validation_steps=config['val_steps_per_epoch'], validation_freq=config['validation_freq'], verbose=config['train_verbose_mode'])
print(history.history)
save_histories_plot_images(history, config=config, dataset=dataset, mode='train_val', k_fold_index=k_fold_index)
return (model, history)
|
def train_process(config, model, paths_train_img, paths_train_label, paths_val_img, paths_val_label, dataset, cp_callback, saver1, k_fold_index=0, init_epoch=0):
ds_train = pipeline(config, paths_train_img, paths_train_label, dataset=dataset)
ds_validation = pipeline(config, paths_val_img, paths_val_label, dataset=dataset)
history = model.fit(ds_train, epochs=(config['epochs'] + init_epoch), steps_per_epoch=config['train_steps_per_epoch'], callbacks=[cp_callback, saver1], initial_epoch=init_epoch, validation_data=ds_validation, validation_steps=config['val_steps_per_epoch'], validation_freq=config['validation_freq'], verbose=config['train_verbose_mode'])
print(history.history)
save_histories_plot_images(history, config=config, dataset=dataset, mode='train_val', k_fold_index=k_fold_index)
return (model, history)<|docstring|>Internal function<|endoftext|>
|
f9c71c918522aae243a333f79ff67497449fcf7f5f41760120580dcefe0bc0fa
|
def train_config_setting(config, dataset):
'\n Configuring parameter for training\n :param config: type dict: config parameter\n :param dataset: type str: dataset name\n :return: config: type dict: config parameter\n '
if config['read_body_identification']:
filename_max_shape = (((config['dir_dataset_info'] + '/max_shape_') + dataset) + '_bi.pickle')
else:
filename_max_shape = (((config['dir_dataset_info'] + '/max_shape_') + dataset) + '.pickle')
with open(filename_max_shape, 'rb') as fp:
config['max_shape'] = pickle.load(fp)
(config['channel_img_num'], config['channel_label_num']) = (config['max_shape']['image'][(- 1)], config['max_shape']['label'][(- 1)])
if (config['input_channel'][dataset] is not None):
config['channel_img_num'] = len(config['input_channel'][dataset])
if (not config['read_body_identification']):
if (config['output_channel'][dataset] is not None):
config['channel_label_num'] = len(config['output_channel'][dataset])
if config['model_add_background_output']:
config['channel_label_num'] += 1
print('channel_img,', config['channel_img_num'], 'channel_label,', config['channel_label_num'])
return config
|
Configuring parameter for training
:param config: type dict: config parameter
:param dataset: type str: dataset name
:return: config: type dict: config parameter
|
train.py
|
train_config_setting
|
mourmoerl/med_segmentation
| 0
|
python
|
def train_config_setting(config, dataset):
'\n Configuring parameter for training\n :param config: type dict: config parameter\n :param dataset: type str: dataset name\n :return: config: type dict: config parameter\n '
if config['read_body_identification']:
filename_max_shape = (((config['dir_dataset_info'] + '/max_shape_') + dataset) + '_bi.pickle')
else:
filename_max_shape = (((config['dir_dataset_info'] + '/max_shape_') + dataset) + '.pickle')
with open(filename_max_shape, 'rb') as fp:
config['max_shape'] = pickle.load(fp)
(config['channel_img_num'], config['channel_label_num']) = (config['max_shape']['image'][(- 1)], config['max_shape']['label'][(- 1)])
if (config['input_channel'][dataset] is not None):
config['channel_img_num'] = len(config['input_channel'][dataset])
if (not config['read_body_identification']):
if (config['output_channel'][dataset] is not None):
config['channel_label_num'] = len(config['output_channel'][dataset])
if config['model_add_background_output']:
config['channel_label_num'] += 1
print('channel_img,', config['channel_img_num'], 'channel_label,', config['channel_label_num'])
return config
|
def train_config_setting(config, dataset):
'\n Configuring parameter for training\n :param config: type dict: config parameter\n :param dataset: type str: dataset name\n :return: config: type dict: config parameter\n '
if config['read_body_identification']:
filename_max_shape = (((config['dir_dataset_info'] + '/max_shape_') + dataset) + '_bi.pickle')
else:
filename_max_shape = (((config['dir_dataset_info'] + '/max_shape_') + dataset) + '.pickle')
with open(filename_max_shape, 'rb') as fp:
config['max_shape'] = pickle.load(fp)
(config['channel_img_num'], config['channel_label_num']) = (config['max_shape']['image'][(- 1)], config['max_shape']['label'][(- 1)])
if (config['input_channel'][dataset] is not None):
config['channel_img_num'] = len(config['input_channel'][dataset])
if (not config['read_body_identification']):
if (config['output_channel'][dataset] is not None):
config['channel_label_num'] = len(config['output_channel'][dataset])
if config['model_add_background_output']:
config['channel_label_num'] += 1
print('channel_img,', config['channel_img_num'], 'channel_label,', config['channel_label_num'])
return config<|docstring|>Configuring parameter for training
:param config: type dict: config parameter
:param dataset: type str: dataset name
:return: config: type dict: config parameter<|endoftext|>
|
8072571cdfd1f4c24d26a8e291a6c87c467414d7c3c6a6b9f2244cb61ed8f94a
|
def k_fold_train_process(config, model, k_fold, paths, dataset, cp_callback, init_epoch, saver1):
'\n K-fold training\n\n :param config: type dict: config parameter\n :param model: type tf.keras.Model, training model\n :param paths: type dict of str: tfrecords path loaded from pickle file.\n :param dataset: type str: name of dataset\n :param cp_callback: type tf.keras.callbacks.ModelCheckpoint, training check point\n :param init_epoch: type int, initial epoch.\n :return: models: type tf.keras.Model, trained model\n :return: history type list of float, metrics evaluating value from each epoch.\n '
history = None
list_1 = list(zip(paths['path_train_val_img'], paths['path_train_val_label']))
random.shuffle(list_1)
divided_datapath = (len(list_1) // k_fold)
assert (divided_datapath > 0)
for k in range(k_fold):
list_val = list_1[(k * divided_datapath):((k + 1) * divided_datapath)]
list_train = (list_1[0:(k * divided_datapath)] + list_1[((k + 1) * divided_datapath):len(list_1)])
print('k_fold', k, ' list_val:', list_val, ' list_train:', list_train)
[paths_train_img, paths_train_label] = zip(*list_train)
[paths_val_img, paths_val_label] = zip(*list_val)
print('Now training data:', dataset, ', k fold: ', k, ' ...')
if (not config['k_fold_merge_model']):
(model, history_curr) = train_process(config, model, paths_train_img, paths_train_label, paths_val_img, paths_val_label, dataset, cp_callback, saver1, k_fold_index=k, init_epoch=((k * config['epochs']) + init_epoch))
history.append(history_curr)
else:
(model, history_curr) = train_process(config, model, paths_train_img, paths_train_label, paths_val_img, paths_val_label, dataset, cp_callback, saver1, k_fold_index=k, init_epoch=init_epoch)
history.append(history_curr)
saved_model_path = ((((config['saved_models_dir'] + '/') + config['exp_name']) + '/') + config['model'])
if (not os.path.exists(saved_model_path)):
os.makedirs(saved_model_path)
model.save((((((saved_model_path + '/') + dataset) + 'k_fold_') + str(k)) + '.h5'))
if (k != (k_fold - 1)):
if (not config['train_premodel']):
call_model = getattr(ModelSet, config['model'])
(model, list_metric_names) = call_model(self=ModelSet, config=config)
else:
model = load_model_file(config, dataset, compile=True)
return (model, history)
|
K-fold training
:param config: type dict: config parameter
:param model: type tf.keras.Model, training model
:param paths: type dict of str: tfrecords path loaded from pickle file.
:param dataset: type str: name of dataset
:param cp_callback: type tf.keras.callbacks.ModelCheckpoint, training check point
:param init_epoch: type int, initial epoch.
:return: models: type tf.keras.Model, trained model
:return: history type list of float, metrics evaluating value from each epoch.
|
train.py
|
k_fold_train_process
|
mourmoerl/med_segmentation
| 0
|
python
|
def k_fold_train_process(config, model, k_fold, paths, dataset, cp_callback, init_epoch, saver1):
'\n K-fold training\n\n :param config: type dict: config parameter\n :param model: type tf.keras.Model, training model\n :param paths: type dict of str: tfrecords path loaded from pickle file.\n :param dataset: type str: name of dataset\n :param cp_callback: type tf.keras.callbacks.ModelCheckpoint, training check point\n :param init_epoch: type int, initial epoch.\n :return: models: type tf.keras.Model, trained model\n :return: history type list of float, metrics evaluating value from each epoch.\n '
history = None
list_1 = list(zip(paths['path_train_val_img'], paths['path_train_val_label']))
random.shuffle(list_1)
divided_datapath = (len(list_1) // k_fold)
assert (divided_datapath > 0)
for k in range(k_fold):
list_val = list_1[(k * divided_datapath):((k + 1) * divided_datapath)]
list_train = (list_1[0:(k * divided_datapath)] + list_1[((k + 1) * divided_datapath):len(list_1)])
print('k_fold', k, ' list_val:', list_val, ' list_train:', list_train)
[paths_train_img, paths_train_label] = zip(*list_train)
[paths_val_img, paths_val_label] = zip(*list_val)
print('Now training data:', dataset, ', k fold: ', k, ' ...')
if (not config['k_fold_merge_model']):
(model, history_curr) = train_process(config, model, paths_train_img, paths_train_label, paths_val_img, paths_val_label, dataset, cp_callback, saver1, k_fold_index=k, init_epoch=((k * config['epochs']) + init_epoch))
history.append(history_curr)
else:
(model, history_curr) = train_process(config, model, paths_train_img, paths_train_label, paths_val_img, paths_val_label, dataset, cp_callback, saver1, k_fold_index=k, init_epoch=init_epoch)
history.append(history_curr)
saved_model_path = ((((config['saved_models_dir'] + '/') + config['exp_name']) + '/') + config['model'])
if (not os.path.exists(saved_model_path)):
os.makedirs(saved_model_path)
model.save((((((saved_model_path + '/') + dataset) + 'k_fold_') + str(k)) + '.h5'))
if (k != (k_fold - 1)):
if (not config['train_premodel']):
call_model = getattr(ModelSet, config['model'])
(model, list_metric_names) = call_model(self=ModelSet, config=config)
else:
model = load_model_file(config, dataset, compile=True)
return (model, history)
|
def k_fold_train_process(config, model, k_fold, paths, dataset, cp_callback, init_epoch, saver1):
'\n K-fold training\n\n :param config: type dict: config parameter\n :param model: type tf.keras.Model, training model\n :param paths: type dict of str: tfrecords path loaded from pickle file.\n :param dataset: type str: name of dataset\n :param cp_callback: type tf.keras.callbacks.ModelCheckpoint, training check point\n :param init_epoch: type int, initial epoch.\n :return: models: type tf.keras.Model, trained model\n :return: history type list of float, metrics evaluating value from each epoch.\n '
history = None
list_1 = list(zip(paths['path_train_val_img'], paths['path_train_val_label']))
random.shuffle(list_1)
divided_datapath = (len(list_1) // k_fold)
assert (divided_datapath > 0)
for k in range(k_fold):
list_val = list_1[(k * divided_datapath):((k + 1) * divided_datapath)]
list_train = (list_1[0:(k * divided_datapath)] + list_1[((k + 1) * divided_datapath):len(list_1)])
print('k_fold', k, ' list_val:', list_val, ' list_train:', list_train)
[paths_train_img, paths_train_label] = zip(*list_train)
[paths_val_img, paths_val_label] = zip(*list_val)
print('Now training data:', dataset, ', k fold: ', k, ' ...')
if (not config['k_fold_merge_model']):
(model, history_curr) = train_process(config, model, paths_train_img, paths_train_label, paths_val_img, paths_val_label, dataset, cp_callback, saver1, k_fold_index=k, init_epoch=((k * config['epochs']) + init_epoch))
history.append(history_curr)
else:
(model, history_curr) = train_process(config, model, paths_train_img, paths_train_label, paths_val_img, paths_val_label, dataset, cp_callback, saver1, k_fold_index=k, init_epoch=init_epoch)
history.append(history_curr)
saved_model_path = ((((config['saved_models_dir'] + '/') + config['exp_name']) + '/') + config['model'])
if (not os.path.exists(saved_model_path)):
os.makedirs(saved_model_path)
model.save((((((saved_model_path + '/') + dataset) + 'k_fold_') + str(k)) + '.h5'))
if (k != (k_fold - 1)):
if (not config['train_premodel']):
call_model = getattr(ModelSet, config['model'])
(model, list_metric_names) = call_model(self=ModelSet, config=config)
else:
model = load_model_file(config, dataset, compile=True)
return (model, history)<|docstring|>K-fold training
:param config: type dict: config parameter
:param model: type tf.keras.Model, training model
:param paths: type dict of str: tfrecords path loaded from pickle file.
:param dataset: type str: name of dataset
:param cp_callback: type tf.keras.callbacks.ModelCheckpoint, training check point
:param init_epoch: type int, initial epoch.
:return: models: type tf.keras.Model, trained model
:return: history type list of float, metrics evaluating value from each epoch.<|endoftext|>
|
a39f41b334eb3cc4c5190b15f5b27cc42422dbfc02da2fb51a6b039ed1b7eacd
|
def check_output_folder(var, country_str, msg):
'\n Check if the output folder is valid, if not\n just default to dekstop \n '
if (not var):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
if (country_str == 'Global'):
return os.path.join('/Users', USER, 'Desktop', 'covidify-output')
else:
return os.path.join('/Users', USER, 'Desktop', 'covidify-output-{}'.format(country_str))
else:
return var
|
Check if the output folder is valid, if not
just default to dekstop
|
src/covidify/cli.py
|
check_output_folder
|
barberw-OSU/covidify
| 323
|
python
|
def check_output_folder(var, country_str, msg):
'\n Check if the output folder is valid, if not\n just default to dekstop \n '
if (not var):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
if (country_str == 'Global'):
return os.path.join('/Users', USER, 'Desktop', 'covidify-output')
else:
return os.path.join('/Users', USER, 'Desktop', 'covidify-output-{}'.format(country_str))
else:
return var
|
def check_output_folder(var, country_str, msg):
'\n Check if the output folder is valid, if not\n just default to dekstop \n '
if (not var):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
if (country_str == 'Global'):
return os.path.join('/Users', USER, 'Desktop', 'covidify-output')
else:
return os.path.join('/Users', USER, 'Desktop', 'covidify-output-{}'.format(country_str))
else:
return var<|docstring|>Check if the output folder is valid, if not
just default to dekstop<|endoftext|>
|
aca284208df392bfbc7db6b9d7688f378c9a29a3421e172c227a9f727b0e72bc
|
def check_forecast_days(var, msg):
'\n Default days for forecasting\n '
if (not var):
return DAYS_IN_FUTURE
else:
return var
|
Default days for forecasting
|
src/covidify/cli.py
|
check_forecast_days
|
barberw-OSU/covidify
| 323
|
python
|
def check_forecast_days(var, msg):
'\n \n '
if (not var):
return DAYS_IN_FUTURE
else:
return var
|
def check_forecast_days(var, msg):
'\n \n '
if (not var):
return DAYS_IN_FUTURE
else:
return var<|docstring|>Default days for forecasting<|endoftext|>
|
e9a669fb422bbaadd0bb5a20a9ce5bee923d78a86991e82cb44595d06a713f5e
|
def check_top_countries(var, msg):
'\n Check number of countries for the log plot\n '
if (not var):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
return LOG_TOP_N_COUNTRIES
else:
return var
|
Check number of countries for the log plot
|
src/covidify/cli.py
|
check_top_countries
|
barberw-OSU/covidify
| 323
|
python
|
def check_top_countries(var, msg):
'\n \n '
if (not var):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
return LOG_TOP_N_COUNTRIES
else:
return var
|
def check_top_countries(var, msg):
'\n \n '
if (not var):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
return LOG_TOP_N_COUNTRIES
else:
return var<|docstring|>Check number of countries for the log plot<|endoftext|>
|
fd67c0bec504bd10e48559bca104e03689b47f80750a633c5b3ebebb9aa47d06
|
def check_source_arg(var, msg):
'\n Check if the datasource is valid, if not then just\n default to the john hopkin github repo\n '
if (var is None):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
return 'JHU'
elif (('wiki' in var) or ('JHU' in var)):
return var
else:
print(('%sMESSAGE: %s' % ((' ' * 5), 'invalid source given')))
sys.exit()
|
Check if the datasource is valid, if not then just
default to the john hopkin github repo
|
src/covidify/cli.py
|
check_source_arg
|
barberw-OSU/covidify
| 323
|
python
|
def check_source_arg(var, msg):
'\n Check if the datasource is valid, if not then just\n default to the john hopkin github repo\n '
if (var is None):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
return 'JHU'
elif (('wiki' in var) or ('JHU' in var)):
return var
else:
print(('%sMESSAGE: %s' % ((' ' * 5), 'invalid source given')))
sys.exit()
|
def check_source_arg(var, msg):
'\n Check if the datasource is valid, if not then just\n default to the john hopkin github repo\n '
if (var is None):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
return 'JHU'
elif (('wiki' in var) or ('JHU' in var)):
return var
else:
print(('%sMESSAGE: %s' % ((' ' * 5), 'invalid source given')))
sys.exit()<|docstring|>Check if the datasource is valid, if not then just
default to the john hopkin github repo<|endoftext|>
|
b20b7c545dfea6a5700138332d636006fd487dbe04f33c79c1260df7bc68617e
|
def check_country(country, msg):
'\n Do some regex work on passed country string\n because multi word args are not supported\n '
if (not country):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
return 'Global'
else:
country_str = replace_arg_space(country[0])
return country_str
|
Do some regex work on passed country string
because multi word args are not supported
|
src/covidify/cli.py
|
check_country
|
barberw-OSU/covidify
| 323
|
python
|
def check_country(country, msg):
'\n Do some regex work on passed country string\n because multi word args are not supported\n '
if (not country):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
return 'Global'
else:
country_str = replace_arg_space(country[0])
return country_str
|
def check_country(country, msg):
'\n Do some regex work on passed country string\n because multi word args are not supported\n '
if (not country):
print(('%sMESSAGE: %s' % ((' ' * 5), msg)))
return 'Global'
else:
country_str = replace_arg_space(country[0])
return country_str<|docstring|>Do some regex work on passed country string
because multi word args are not supported<|endoftext|>
|
9e23019d83c5af7319cc23bd14e238d52c6d055c001dca46c16ec583a55e5444
|
@click.group()
def cli():
'\n ☣ COVIDIFY ☣ \n\n - use the most up-to-date data to generate reports of confirmed cases, fatalities and recoveries. \n '
pass
|
☣ COVIDIFY ☣
- use the most up-to-date data to generate reports of confirmed cases, fatalities and recoveries.
|
src/covidify/cli.py
|
cli
|
barberw-OSU/covidify
| 323
|
python
|
@click.group()
def cli():
'\n ☣ COVIDIFY ☣ \n\n - use the most up-to-date data to generate reports of confirmed cases, fatalities and recoveries. \n '
pass
|
@click.group()
def cli():
'\n ☣ COVIDIFY ☣ \n\n - use the most up-to-date data to generate reports of confirmed cases, fatalities and recoveries. \n '
pass<|docstring|>☣ COVIDIFY ☣
- use the most up-to-date data to generate reports of confirmed cases, fatalities and recoveries.<|endoftext|>
|
6eac07aecaf1fe7f03c5835813901e8383788dd9720c512c79fde6176cc15837
|
@cli.command()
@click.option('--output', help=(('Folder to output data and reports [Default: /Users/' + USER) + '/Desktop/covidify-output/]'))
@click.option('--source', help='There are two datasources to choose from, John Hopkins github repo or wikipedia -- options are JHU or wiki respectively [Default: JHU]')
@click.option('--country', help='Filter reports by a country', multiple=True, type=str)
@click.option('--top', help=(('Top N infected countries for log plot. [Default: ' + str(LOG_TOP_N_COUNTRIES)) + ']'))
@click.option('--forecast', help=(('Number of days to forecast cumulative cases in the future. [Default: ' + str(DAYS_IN_FUTURE)) + ']'))
def run(output, source, country, top, forecast):
'\n Generate reports for global cases or refine by country.\n '
country_str = check_country(country, '\x1b[1;31m No country specified, defaulting to global cases \x1b[0;0m')
output = check_output_folder(output, country_str, (('\x1b[1;31m No output directory given, defaulting to /Users/' + USER) + '/Desktop/ \x1b[0;0m'))
source = check_source_arg(source, '\x1b[1;31m No source given, defaulting to John Hopkin CSSE github repo \x1b[0;0m')
top = check_top_countries(top, (('\x1b[1;31m No top countries given, defaulting to top ' + str(LOG_TOP_N_COUNTRIES)) + ' \x1b[0;0m'))
forecast = check_forecast_days(forecast, (('\x1b[1;31m No days for forecasting given, defaulting to ' + str(DAYS_IN_FUTURE)) + ' \x1b[0;0m'))
os.system((((((((((((((env + SCRIPT) + ' ') + env) + ' ') + output) + ' ') + source) + ' ') + country_str) + ' ') + str(top)) + ' ') + str(forecast)))
|
Generate reports for global cases or refine by country.
|
src/covidify/cli.py
|
run
|
barberw-OSU/covidify
| 323
|
python
|
@cli.command()
@click.option('--output', help=(('Folder to output data and reports [Default: /Users/' + USER) + '/Desktop/covidify-output/]'))
@click.option('--source', help='There are two datasources to choose from, John Hopkins github repo or wikipedia -- options are JHU or wiki respectively [Default: JHU]')
@click.option('--country', help='Filter reports by a country', multiple=True, type=str)
@click.option('--top', help=(('Top N infected countries for log plot. [Default: ' + str(LOG_TOP_N_COUNTRIES)) + ']'))
@click.option('--forecast', help=(('Number of days to forecast cumulative cases in the future. [Default: ' + str(DAYS_IN_FUTURE)) + ']'))
def run(output, source, country, top, forecast):
'\n \n '
country_str = check_country(country, '\x1b[1;31m No country specified, defaulting to global cases \x1b[0;0m')
output = check_output_folder(output, country_str, (('\x1b[1;31m No output directory given, defaulting to /Users/' + USER) + '/Desktop/ \x1b[0;0m'))
source = check_source_arg(source, '\x1b[1;31m No source given, defaulting to John Hopkin CSSE github repo \x1b[0;0m')
top = check_top_countries(top, (('\x1b[1;31m No top countries given, defaulting to top ' + str(LOG_TOP_N_COUNTRIES)) + ' \x1b[0;0m'))
forecast = check_forecast_days(forecast, (('\x1b[1;31m No days for forecasting given, defaulting to ' + str(DAYS_IN_FUTURE)) + ' \x1b[0;0m'))
os.system((((((((((((((env + SCRIPT) + ' ') + env) + ' ') + output) + ' ') + source) + ' ') + country_str) + ' ') + str(top)) + ' ') + str(forecast)))
|
@cli.command()
@click.option('--output', help=(('Folder to output data and reports [Default: /Users/' + USER) + '/Desktop/covidify-output/]'))
@click.option('--source', help='There are two datasources to choose from, John Hopkins github repo or wikipedia -- options are JHU or wiki respectively [Default: JHU]')
@click.option('--country', help='Filter reports by a country', multiple=True, type=str)
@click.option('--top', help=(('Top N infected countries for log plot. [Default: ' + str(LOG_TOP_N_COUNTRIES)) + ']'))
@click.option('--forecast', help=(('Number of days to forecast cumulative cases in the future. [Default: ' + str(DAYS_IN_FUTURE)) + ']'))
def run(output, source, country, top, forecast):
'\n \n '
country_str = check_country(country, '\x1b[1;31m No country specified, defaulting to global cases \x1b[0;0m')
output = check_output_folder(output, country_str, (('\x1b[1;31m No output directory given, defaulting to /Users/' + USER) + '/Desktop/ \x1b[0;0m'))
source = check_source_arg(source, '\x1b[1;31m No source given, defaulting to John Hopkin CSSE github repo \x1b[0;0m')
top = check_top_countries(top, (('\x1b[1;31m No top countries given, defaulting to top ' + str(LOG_TOP_N_COUNTRIES)) + ' \x1b[0;0m'))
forecast = check_forecast_days(forecast, (('\x1b[1;31m No days for forecasting given, defaulting to ' + str(DAYS_IN_FUTURE)) + ' \x1b[0;0m'))
os.system((((((((((((((env + SCRIPT) + ' ') + env) + ' ') + output) + ' ') + source) + ' ') + country_str) + ' ') + str(top)) + ' ') + str(forecast)))<|docstring|>Generate reports for global cases or refine by country.<|endoftext|>
|
00439d987b7cb316aa15525ead776c924aaa6e3df1316af8de33f5ac23600ee2
|
@click.option('--countries', help='List countries that have had confirmed cases.', is_flag=True)
@cli.command()
def list(countries):
'\n List all the countries that have confirmed cases.\n '
countries = check_list_flag(countries, '\x1b[1;31m Invalid flag passed. Make sure to use --countries\x1b[0;0m')
if countries:
get_countries()
|
List all the countries that have confirmed cases.
|
src/covidify/cli.py
|
list
|
barberw-OSU/covidify
| 323
|
python
|
@click.option('--countries', help='List countries that have had confirmed cases.', is_flag=True)
@cli.command()
def list(countries):
'\n \n '
countries = check_list_flag(countries, '\x1b[1;31m Invalid flag passed. Make sure to use --countries\x1b[0;0m')
if countries:
get_countries()
|
@click.option('--countries', help='List countries that have had confirmed cases.', is_flag=True)
@cli.command()
def list(countries):
'\n \n '
countries = check_list_flag(countries, '\x1b[1;31m Invalid flag passed. Make sure to use --countries\x1b[0;0m')
if countries:
get_countries()<|docstring|>List all the countries that have confirmed cases.<|endoftext|>
|
6ad18e36511743f637cd7e6f5706b71db9d1ca0f6e6e964cb58e73b3bebac4c2
|
def __init__(self, backbone, transformer, num_classes, num_frames, num_queries, aux_loss=False):
' Initializes the model.\n Parameters:\n backbone: torch module of the backbone to be used. See backbone.py\n transformer: torch module of the transformer architecture. See transformer.py\n num_classes: number of object classes\n num_queries: number of object queries, ie detection slot. This is the maximal number of objects\n VisTR can detect in a video. For ytvos, we recommend 10 queries for each frame, \n thus 360 queries for 36 frames.\n aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n '
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.hidden_dim = hidden_dim
self.class_embed = nn.Linear(hidden_dim, (num_classes + 1))
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.num_frames = num_frames
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
|
Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
VisTR can detect in a video. For ytvos, we recommend 10 queries for each frame,
thus 360 queries for 36 frames.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
|
models/vistr.py
|
__init__
|
rbli-john/VisTR
| 646
|
python
|
def __init__(self, backbone, transformer, num_classes, num_frames, num_queries, aux_loss=False):
' Initializes the model.\n Parameters:\n backbone: torch module of the backbone to be used. See backbone.py\n transformer: torch module of the transformer architecture. See transformer.py\n num_classes: number of object classes\n num_queries: number of object queries, ie detection slot. This is the maximal number of objects\n VisTR can detect in a video. For ytvos, we recommend 10 queries for each frame, \n thus 360 queries for 36 frames.\n aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n '
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.hidden_dim = hidden_dim
self.class_embed = nn.Linear(hidden_dim, (num_classes + 1))
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.num_frames = num_frames
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss
|
def __init__(self, backbone, transformer, num_classes, num_frames, num_queries, aux_loss=False):
' Initializes the model.\n Parameters:\n backbone: torch module of the backbone to be used. See backbone.py\n transformer: torch module of the transformer architecture. See transformer.py\n num_classes: number of object classes\n num_queries: number of object queries, ie detection slot. This is the maximal number of objects\n VisTR can detect in a video. For ytvos, we recommend 10 queries for each frame, \n thus 360 queries for 36 frames.\n aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n '
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
hidden_dim = transformer.d_model
self.hidden_dim = hidden_dim
self.class_embed = nn.Linear(hidden_dim, (num_classes + 1))
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.num_frames = num_frames
self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1)
self.backbone = backbone
self.aux_loss = aux_loss<|docstring|>Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of object classes
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
VisTR can detect in a video. For ytvos, we recommend 10 queries for each frame,
thus 360 queries for 36 frames.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.<|endoftext|>
|
ca080c6d16d9c964774b2ef8c7c5f8b9349ee6407ba8d7814a2564d6e9394cd1
|
def forward(self, samples: NestedTensor):
'\xa0The forward expects a NestedTensor, which consists of:\n - samples.tensors: image sequences, of shape [num_frames x 3 x H x W]\n - samples.mask: a binary mask of shape [num_frames x H x W], containing 1 on padded pixels\n\n It returns a dict with the following elements:\n - "pred_logits": the classification logits (including no-object) for all queries.\n Shape= [batch_size x num_queries x (num_classes + 1)]\n - "pred_boxes": The normalized boxes coordinates for all queries, represented as\n (center_x, center_y, height, width). These values are normalized in [0, 1],\n relative to the size of each individual image (disregarding possible padding).\n See PostProcess for information on how to retrieve the unnormalized bounding box.\n - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n '
if (not isinstance(samples, NestedTensor)):
samples = nested_tensor_from_tensor_list(samples)
(features, pos) = self.backbone(samples)
pos = pos[(- 1)]
(src, mask) = features[(- 1)].decompose()
src_proj = self.input_proj(src)
(n, c, h, w) = src_proj.shape
assert (mask is not None)
src_proj = src_proj.reshape((n // self.num_frames), self.num_frames, c, h, w).permute(0, 2, 1, 3, 4).flatten((- 2))
mask = mask.reshape((n // self.num_frames), self.num_frames, (h * w))
pos = pos.permute(0, 2, 1, 3, 4).flatten((- 2))
hs = self.transformer(src_proj, mask, self.query_embed.weight, pos)[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[(- 1)], 'pred_boxes': outputs_coord[(- 1)]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
|
The forward expects a NestedTensor, which consists of:
- samples.tensors: image sequences, of shape [num_frames x 3 x H x W]
- samples.mask: a binary mask of shape [num_frames x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
|
models/vistr.py
|
forward
|
rbli-john/VisTR
| 646
|
python
|
def forward(self, samples: NestedTensor):
'\xa0The forward expects a NestedTensor, which consists of:\n - samples.tensors: image sequences, of shape [num_frames x 3 x H x W]\n - samples.mask: a binary mask of shape [num_frames x H x W], containing 1 on padded pixels\n\n It returns a dict with the following elements:\n - "pred_logits": the classification logits (including no-object) for all queries.\n Shape= [batch_size x num_queries x (num_classes + 1)]\n - "pred_boxes": The normalized boxes coordinates for all queries, represented as\n (center_x, center_y, height, width). These values are normalized in [0, 1],\n relative to the size of each individual image (disregarding possible padding).\n See PostProcess for information on how to retrieve the unnormalized bounding box.\n - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n '
if (not isinstance(samples, NestedTensor)):
samples = nested_tensor_from_tensor_list(samples)
(features, pos) = self.backbone(samples)
pos = pos[(- 1)]
(src, mask) = features[(- 1)].decompose()
src_proj = self.input_proj(src)
(n, c, h, w) = src_proj.shape
assert (mask is not None)
src_proj = src_proj.reshape((n // self.num_frames), self.num_frames, c, h, w).permute(0, 2, 1, 3, 4).flatten((- 2))
mask = mask.reshape((n // self.num_frames), self.num_frames, (h * w))
pos = pos.permute(0, 2, 1, 3, 4).flatten((- 2))
hs = self.transformer(src_proj, mask, self.query_embed.weight, pos)[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[(- 1)], 'pred_boxes': outputs_coord[(- 1)]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out
|
def forward(self, samples: NestedTensor):
'\xa0The forward expects a NestedTensor, which consists of:\n - samples.tensors: image sequences, of shape [num_frames x 3 x H x W]\n - samples.mask: a binary mask of shape [num_frames x H x W], containing 1 on padded pixels\n\n It returns a dict with the following elements:\n - "pred_logits": the classification logits (including no-object) for all queries.\n Shape= [batch_size x num_queries x (num_classes + 1)]\n - "pred_boxes": The normalized boxes coordinates for all queries, represented as\n (center_x, center_y, height, width). These values are normalized in [0, 1],\n relative to the size of each individual image (disregarding possible padding).\n See PostProcess for information on how to retrieve the unnormalized bounding box.\n - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of\n dictionnaries containing the two above keys for each decoder layer.\n '
if (not isinstance(samples, NestedTensor)):
samples = nested_tensor_from_tensor_list(samples)
(features, pos) = self.backbone(samples)
pos = pos[(- 1)]
(src, mask) = features[(- 1)].decompose()
src_proj = self.input_proj(src)
(n, c, h, w) = src_proj.shape
assert (mask is not None)
src_proj = src_proj.reshape((n // self.num_frames), self.num_frames, c, h, w).permute(0, 2, 1, 3, 4).flatten((- 2))
mask = mask.reshape((n // self.num_frames), self.num_frames, (h * w))
pos = pos.permute(0, 2, 1, 3, 4).flatten((- 2))
hs = self.transformer(src_proj, mask, self.query_embed.weight, pos)[0]
outputs_class = self.class_embed(hs)
outputs_coord = self.bbox_embed(hs).sigmoid()
out = {'pred_logits': outputs_class[(- 1)], 'pred_boxes': outputs_coord[(- 1)]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
return out<|docstring|>The forward expects a NestedTensor, which consists of:
- samples.tensors: image sequences, of shape [num_frames x 3 x H x W]
- samples.mask: a binary mask of shape [num_frames x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, height, width). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.<|endoftext|>
|
899d8244164f8f345f3d80cae9027bdf699c77de608f689f57889828dab2ff31
|
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
' Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n '
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones((self.num_classes + 1))
empty_weight[(- 1)] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
|
Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.
|
models/vistr.py
|
__init__
|
rbli-john/VisTR
| 646
|
python
|
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
' Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n '
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones((self.num_classes + 1))
empty_weight[(- 1)] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
|
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
' Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n '
super().__init__()
self.num_classes = num_classes
self.matcher = matcher
self.weight_dict = weight_dict
self.eos_coef = eos_coef
self.losses = losses
empty_weight = torch.ones((self.num_classes + 1))
empty_weight[(- 1)] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)<|docstring|>Create the criterion.
Parameters:
num_classes: number of object categories, omitting the special no-object category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-object category
losses: list of all the losses to be applied. See get_loss for list of available losses.<|endoftext|>
|
2f837fb5b9b30968c785e13e65c7ddb316d54e9819b701d88228ffc326c1b732
|
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
'Classification loss (NLL)\n targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]\n '
assert ('pred_logits' in outputs)
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t['labels'][J] for (t, (_, J)) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
losses['class_error'] = (100 - accuracy(src_logits[idx], target_classes_o)[0])
return losses
|
Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
|
models/vistr.py
|
loss_labels
|
rbli-john/VisTR
| 646
|
python
|
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
'Classification loss (NLL)\n targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]\n '
assert ('pred_logits' in outputs)
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t['labels'][J] for (t, (_, J)) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
losses['class_error'] = (100 - accuracy(src_logits[idx], target_classes_o)[0])
return losses
|
def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
'Classification loss (NLL)\n targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]\n '
assert ('pred_logits' in outputs)
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t['labels'][J] for (t, (_, J)) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
if log:
losses['class_error'] = (100 - accuracy(src_logits[idx], target_classes_o)[0])
return losses<|docstring|>Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]<|endoftext|>
|
ba75bd089b7975e09bf5464302c8f6b4988fe7c4359d87c88e4d2475faca6c13
|
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes\n This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients\n "
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v['labels']) for v in targets], device=device)
card_pred = (pred_logits.argmax((- 1)) != (pred_logits.shape[(- 1)] - 1)).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
|
Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
|
models/vistr.py
|
loss_cardinality
|
rbli-john/VisTR
| 646
|
python
|
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes\n This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients\n "
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v['labels']) for v in targets], device=device)
card_pred = (pred_logits.argmax((- 1)) != (pred_logits.shape[(- 1)] - 1)).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
|
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_boxes):
" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes\n This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients\n "
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v['labels']) for v in targets], device=device)
card_pred = (pred_logits.argmax((- 1)) != (pred_logits.shape[(- 1)] - 1)).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses<|docstring|>Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients<|endoftext|>
|
d9660936cae9acd987268ea0e1e52a38135d5ee304f738d1ecbba66ddad02286
|
def loss_boxes(self, outputs, targets, indices, num_boxes):
'Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.\n '
assert ('pred_boxes' in outputs)
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for (t, (_, i)) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = (loss_bbox.sum() / num_boxes)
loss_giou = (1 - torch.diag(box_ops.generalized_box_iou(box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))))
losses['loss_giou'] = (loss_giou.sum() / num_boxes)
return losses
|
Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
|
models/vistr.py
|
loss_boxes
|
rbli-john/VisTR
| 646
|
python
|
def loss_boxes(self, outputs, targets, indices, num_boxes):
'Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.\n '
assert ('pred_boxes' in outputs)
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for (t, (_, i)) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = (loss_bbox.sum() / num_boxes)
loss_giou = (1 - torch.diag(box_ops.generalized_box_iou(box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))))
losses['loss_giou'] = (loss_giou.sum() / num_boxes)
return losses
|
def loss_boxes(self, outputs, targets, indices, num_boxes):
'Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.\n '
assert ('pred_boxes' in outputs)
idx = self._get_src_permutation_idx(indices)
src_boxes = outputs['pred_boxes'][idx]
target_boxes = torch.cat([t['boxes'][i] for (t, (_, i)) in zip(targets, indices)], dim=0)
loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')
losses = {}
losses['loss_bbox'] = (loss_bbox.sum() / num_boxes)
loss_giou = (1 - torch.diag(box_ops.generalized_box_iou(box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))))
losses['loss_giou'] = (loss_giou.sum() / num_boxes)
return losses<|docstring|>Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.<|endoftext|>
|
5ff00ad3499fc72c16b05d090d40ac03291a5a0fbdf5774000d56dbf86d166c9
|
def loss_masks(self, outputs, targets, indices, num_boxes):
'Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]\n '
assert ('pred_masks' in outputs)
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs['pred_masks']
(target_masks, valid) = nested_tensor_from_tensor_list([t['masks'] for t in targets], split=False).decompose()
target_masks = target_masks.to(src_masks)
src_masks = src_masks[src_idx]
try:
src_masks = interpolate(src_masks[(:, None)], size=target_masks.shape[(- 2):], mode='bilinear', align_corners=False)
src_masks = src_masks[(:, 0)].flatten(1)
target_masks = target_masks[tgt_idx].flatten(1)
except:
src_masks = src_masks.flatten(1)
target_masks = src_masks.clone()
losses = {'loss_mask': sigmoid_focal_loss(src_masks, target_masks, num_boxes), 'loss_dice': dice_loss(src_masks, target_masks, num_boxes)}
return losses
|
Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]
|
models/vistr.py
|
loss_masks
|
rbli-john/VisTR
| 646
|
python
|
def loss_masks(self, outputs, targets, indices, num_boxes):
'Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]\n '
assert ('pred_masks' in outputs)
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs['pred_masks']
(target_masks, valid) = nested_tensor_from_tensor_list([t['masks'] for t in targets], split=False).decompose()
target_masks = target_masks.to(src_masks)
src_masks = src_masks[src_idx]
try:
src_masks = interpolate(src_masks[(:, None)], size=target_masks.shape[(- 2):], mode='bilinear', align_corners=False)
src_masks = src_masks[(:, 0)].flatten(1)
target_masks = target_masks[tgt_idx].flatten(1)
except:
src_masks = src_masks.flatten(1)
target_masks = src_masks.clone()
losses = {'loss_mask': sigmoid_focal_loss(src_masks, target_masks, num_boxes), 'loss_dice': dice_loss(src_masks, target_masks, num_boxes)}
return losses
|
def loss_masks(self, outputs, targets, indices, num_boxes):
'Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]\n '
assert ('pred_masks' in outputs)
src_idx = self._get_src_permutation_idx(indices)
tgt_idx = self._get_tgt_permutation_idx(indices)
src_masks = outputs['pred_masks']
(target_masks, valid) = nested_tensor_from_tensor_list([t['masks'] for t in targets], split=False).decompose()
target_masks = target_masks.to(src_masks)
src_masks = src_masks[src_idx]
try:
src_masks = interpolate(src_masks[(:, None)], size=target_masks.shape[(- 2):], mode='bilinear', align_corners=False)
src_masks = src_masks[(:, 0)].flatten(1)
target_masks = target_masks[tgt_idx].flatten(1)
except:
src_masks = src_masks.flatten(1)
target_masks = src_masks.clone()
losses = {'loss_mask': sigmoid_focal_loss(src_masks, target_masks, num_boxes), 'loss_dice': dice_loss(src_masks, target_masks, num_boxes)}
return losses<|docstring|>Compute the losses related to the masks: the focal loss and the dice loss.
targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.