index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
6,700 | e81373c7b9c43b178f0f12382501be8899189660 | #
# Standard tests on the standard set of model outputs
#
import pybamm
import numpy as np
class StandardOutputTests(object):
"""Calls all the tests on the standard output variables."""
def __init__(self, model, parameter_values, disc, solution):
# Assign attributes
self.model = model
self.parameter_values = parameter_values
self.disc = disc
self.solution = solution
if isinstance(self.model, pybamm.lithium_ion.BaseModel):
self.chemistry = "Lithium-ion"
elif isinstance(self.model, pybamm.lead_acid.BaseModel):
self.chemistry = "Lead acid"
# Only for constant current
current_sign = np.sign(parameter_values["Current function [A]"])
if current_sign == 1:
self.operating_condition = "discharge"
elif current_sign == -1:
self.operating_condition = "charge"
else:
self.operating_condition = "off"
def process_variables(self):
return
def run_test_class(self, ClassName):
"""Run all tests from a class 'ClassName'"""
tests = ClassName(
self.model,
self.parameter_values,
self.disc,
self.solution,
self.operating_condition,
)
tests.test_all()
def test_all(self, skip_first_timestep=False):
self.run_test_class(VoltageTests)
self.run_test_class(ElectrolyteConcentrationTests)
self.run_test_class(PotentialTests)
self.run_test_class(CurrentTests)
if self.chemistry == "Lithium-ion":
self.run_test_class(ParticleConcentrationTests)
if self.model.options["convection"] != "none":
self.run_test_class(VelocityTests)
class BaseOutputTest(object):
def __init__(self, model, param, disc, solution, operating_condition):
self.model = model
self.param = param
self.disc = disc
self.solution = solution
self.operating_condition = operating_condition
# Use dimensional time and space
self.t = solution.t * model.timescale_eval
geo = pybamm.geometric_parameters
L_x = param.evaluate(geo.L_x)
self.x_n = disc.mesh["negative electrode"].nodes * L_x
self.x_s = disc.mesh["separator"].nodes * L_x
self.x_p = disc.mesh["positive electrode"].nodes * L_x
whole_cell = ["negative electrode", "separator", "positive electrode"]
self.x = disc.mesh.combine_submeshes(*whole_cell).nodes * L_x
self.x_n_edge = disc.mesh["negative electrode"].edges * L_x
self.x_s_edge = disc.mesh["separator"].edges * L_x
self.x_p_edge = disc.mesh["positive electrode"].edges * L_x
self.x_edge = disc.mesh.combine_submeshes(*whole_cell).edges * L_x
if isinstance(self.model, pybamm.lithium_ion.BaseModel):
R_n = param.evaluate(model.param.R_n_typ)
R_p = param.evaluate(model.param.R_p_typ)
self.r_n = disc.mesh["negative particle"].nodes * R_n
self.r_p = disc.mesh["positive particle"].nodes * R_p
self.r_n_edge = disc.mesh["negative particle"].edges * R_n
self.r_p_edge = disc.mesh["positive particle"].edges * R_p
# Useful parameters
self.l_n = param.evaluate(geo.l_n)
self.l_p = param.evaluate(geo.l_p)
current_param = self.model.param.current_with_time
self.i_cell = param.process_symbol(current_param).evaluate(solution.t)
class VoltageTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.eta_r_n = solution["Negative electrode reaction overpotential [V]"]
self.eta_r_p = solution["Positive electrode reaction overpotential [V]"]
self.eta_r_n_av = solution[
"X-averaged negative electrode reaction overpotential [V]"
]
self.eta_r_p_av = solution[
"X-averaged positive electrode reaction overpotential [V]"
]
self.eta_r_av = solution["X-averaged reaction overpotential [V]"]
self.eta_sei_av = solution["X-averaged SEI film overpotential [V]"]
self.eta_e_av = solution["X-averaged electrolyte overpotential [V]"]
self.delta_phi_s_av = solution["X-averaged solid phase ohmic losses [V]"]
self.ocp_n_av = solution[
"X-averaged negative electrode open circuit potential [V]"
]
self.ocp_p_av = solution[
"X-averaged positive electrode open circuit potential [V]"
]
self.ocv_av = solution["X-averaged open circuit voltage [V]"]
self.voltage = solution["Terminal voltage [V]"]
def test_each_reaction_overpotential(self):
"""Testing that:
- discharge: eta_r_n > 0, eta_r_p < 0
- charge: eta_r_n < 0, eta_r_p > 0
- off: eta_r_n == 0, eta_r_p == 0
"""
tol = 0.01
t, x_n, x_p = self.t, self.x_n, self.x_p
if self.operating_condition == "discharge":
np.testing.assert_array_less(-self.eta_r_n(t, x_n), tol)
np.testing.assert_array_less(self.eta_r_p(t, x_p), tol)
elif self.operating_condition == "charge":
np.testing.assert_array_less(self.eta_r_n(t, x_n), tol)
np.testing.assert_array_less(-self.eta_r_p(t, x_p), tol)
elif self.operating_condition == "off":
np.testing.assert_array_equal(self.eta_r_n(t, x_n), 0)
np.testing.assert_array_equal(-self.eta_r_p(t, x_p), 0)
def test_overpotentials(self):
"""Testing that all are:
- discharge: . < 0
- charge: . > 0
- off: . == 0
"""
tol = 0.001
if self.operating_condition == "discharge":
np.testing.assert_array_less(self.eta_r_av(self.t), tol)
np.testing.assert_array_less(self.eta_e_av(self.t), tol)
np.testing.assert_array_less(self.delta_phi_s_av(self.t), tol)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-self.eta_r_av(self.t), tol)
np.testing.assert_array_less(-self.eta_e_av(self.t), tol)
np.testing.assert_array_less(-self.delta_phi_s_av(self.t), tol)
elif self.operating_condition == "off":
np.testing.assert_array_equal(self.eta_r_av(self.t), 0)
np.testing.assert_array_equal(self.eta_e_av(self.t), 0)
# For some reason SPM gives delta_phi_s_av ~ 1e-17
np.testing.assert_array_almost_equal(
self.delta_phi_s_av(self.t), 0, decimal=16
)
def test_ocps(self):
"""Testing that:
- discharge: ocp_n increases, ocp_p decreases
- charge: ocp_n decreases, ocp_p increases
- off: ocp_n, ocp_p constant
"""
neg_end_vs_start = self.ocp_n_av(self.t[-1]) - self.ocp_n_av(self.t[1])
pos_end_vs_start = self.ocp_p_av(self.t[-1]) - self.ocp_p_av(self.t[1])
if self.operating_condition == "discharge":
np.testing.assert_array_less(-neg_end_vs_start, 0)
np.testing.assert_array_less(pos_end_vs_start, 0)
elif self.operating_condition == "charge":
np.testing.assert_array_less(neg_end_vs_start, 0)
np.testing.assert_array_less(-pos_end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(neg_end_vs_start, 0)
np.testing.assert_array_almost_equal(pos_end_vs_start, 0)
def test_ocv(self):
"""Testing that:
- discharge: ocv decreases
- charge: ocv increases
- off: ocv constant
"""
end_vs_start = self.ocv_av(self.t[-1]) - self.ocv_av(self.t[1])
if self.operating_condition == "discharge":
np.testing.assert_array_less(end_vs_start, 0)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(end_vs_start, 0)
def test_voltage(self):
"""Testing that:
- discharge: voltage decreases
- charge: voltage increases
- off: voltage constant
"""
end_vs_start = self.voltage(self.t[-1]) - self.voltage(self.t[1])
if self.operating_condition == "discharge":
np.testing.assert_array_less(end_vs_start, 0)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(end_vs_start, 0)
def test_consistent(self):
"""Test voltage components are consistent with one another by ensuring they sum
correctly"""
np.testing.assert_array_almost_equal(
self.ocv_av(self.t), self.ocp_p_av(self.t) - self.ocp_n_av(self.t)
)
np.testing.assert_array_almost_equal(
self.eta_r_av(self.t), self.eta_r_p_av(self.t) - self.eta_r_n_av(self.t)
)
np.testing.assert_array_almost_equal(
self.voltage(self.t),
self.ocv_av(self.t)
+ self.eta_r_av(self.t)
+ self.eta_e_av(self.t)
+ self.delta_phi_s_av(self.t)
+ self.eta_sei_av(self.t),
decimal=2,
)
def test_all(self):
self.test_each_reaction_overpotential()
self.test_overpotentials()
self.test_ocps()
self.test_ocv()
self.test_voltage()
self.test_consistent()
class ParticleConcentrationTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.c_s_n = solution["Negative particle concentration"]
self.c_s_p = solution["Positive particle concentration"]
self.c_s_n_rav = solution["R-averaged negative particle concentration"]
self.c_s_p_rav = solution["R-averaged positive particle concentration"]
self.c_s_n_surf = solution["Negative particle surface concentration"]
self.c_s_p_surf = solution["Positive particle surface concentration"]
self.c_s_n_tot = solution["Total lithium in negative electrode [mol]"]
self.c_s_p_tot = solution["Total lithium in positive electrode [mol]"]
self.N_s_n = solution["Negative particle flux"]
self.N_s_p = solution["Positive particle flux"]
self.c_SEI_n_tot = solution["Loss of lithium to negative electrode SEI [mol]"]
self.c_SEI_p_tot = solution["Loss of lithium to positive electrode SEI [mol]"]
self.c_Li_n_tot = solution[
"Loss of lithium to negative electrode lithium plating [mol]"
]
self.c_Li_p_tot = solution[
"Loss of lithium to positive electrode lithium plating [mol]"
]
def test_concentration_increase_decrease(self):
"""Test all concentrations in negative particles decrease and all
concentrations in positive particles increase over a discharge."""
t, x_n, x_p, r_n, r_p = self.t, self.x_n, self.x_p, self.r_n, self.r_p
if self.model.options["particle"] in ["quadratic profile", "quartic profile"]:
# For the assumed polynomial concentration profiles the values
# can increase/decrease within the particle as the polynomial shifts,
# so we just check the average instead
neg_diff = self.c_s_n_rav(t[1:], x_n) - self.c_s_n_rav(t[:-1], x_n)
pos_diff = self.c_s_p_rav(t[1:], x_p) - self.c_s_p_rav(t[:-1], x_p)
neg_end_vs_start = self.c_s_n_rav(t[-1], x_n) - self.c_s_n_rav(t[0], x_n)
pos_end_vs_start = self.c_s_p_rav(t[-1], x_p) - self.c_s_p_rav(t[0], x_p)
else:
neg_diff = self.c_s_n(t[1:], x_n, r_n) - self.c_s_n(t[:-1], x_n, r_n)
pos_diff = self.c_s_p(t[1:], x_p, r_p) - self.c_s_p(t[:-1], x_p, r_p)
neg_end_vs_start = self.c_s_n(t[-1], x_n, r_n) - self.c_s_n(t[0], x_n, r_n)
pos_end_vs_start = self.c_s_p(t[-1], x_p, r_p) - self.c_s_p(t[0], x_p, r_p)
if self.operating_condition == "discharge":
np.testing.assert_array_less(neg_diff, 1e-16)
np.testing.assert_array_less(-1e-16, pos_diff)
np.testing.assert_array_less(neg_end_vs_start, 0)
np.testing.assert_array_less(0, pos_end_vs_start)
elif self.operating_condition == "charge":
np.testing.assert_array_less(-1e-16, neg_diff)
np.testing.assert_array_less(pos_diff, 1e-16)
np.testing.assert_array_less(0, neg_end_vs_start)
np.testing.assert_array_less(pos_end_vs_start, 0)
elif self.operating_condition == "off":
np.testing.assert_array_almost_equal(neg_diff, 0)
np.testing.assert_array_almost_equal(pos_diff, 0)
np.testing.assert_array_almost_equal(neg_end_vs_start, 0)
np.testing.assert_array_almost_equal(pos_end_vs_start, 0)
def test_concentration_limits(self):
"""Test that concentrations do not go below 0 or exceed the maximum."""
t, x_n, x_p, r_n, r_p = self.t, self.x_n, self.x_p, self.r_n, self.r_p
np.testing.assert_array_less(-self.c_s_n(t, x_n, r_n), 0)
np.testing.assert_array_less(-self.c_s_p(t, x_p, r_p), 0)
np.testing.assert_array_less(self.c_s_n(t, x_n, r_n), 1)
np.testing.assert_array_less(self.c_s_p(t, x_p, r_p), 1)
def test_conservation(self):
"""Test amount of lithium stored across all particles and in SEI layers is
constant."""
self.c_s_tot = (
self.c_s_n_tot(self.solution.t)
+ self.c_s_p_tot(self.solution.t)
+ self.c_SEI_n_tot(self.solution.t)
+ self.c_SEI_p_tot(self.solution.t)
+ self.c_Li_n_tot(self.solution.t)
+ self.c_Li_p_tot(self.solution.t)
)
diff = (self.c_s_tot[1:] - self.c_s_tot[:-1]) / self.c_s_tot[:-1]
if "profile" in self.model.options["particle"]:
np.testing.assert_array_almost_equal(diff, 0, decimal=10)
elif self.model.options["surface form"] == "differential":
np.testing.assert_array_almost_equal(diff, 0, decimal=10)
elif self.model.options["SEI"] == "ec reaction limited":
np.testing.assert_array_almost_equal(diff, 0, decimal=12)
else:
np.testing.assert_array_almost_equal(diff, 0, decimal=15)
def test_concentration_profile(self):
"""Test that the concentration in the centre of the negative particles is
greater than the average concentration in the particle and also that the
concentration on the surface of the negative particle is less than the average
concentration in the particle. Test opposite is true for the positive
particle."""
# TODO: add an output for average particle concentration
def test_fluxes(self):
"""Test that no flux holds in the centre of the particle. Test that surface
flux in the negative particles is greater than zero and that the flux in the
positive particles is less than zero during a discharge."""
t, x_n, x_p, r_n, r_p = (
self.t,
self.x_n,
self.x_p,
self.r_n_edge,
self.r_p_edge,
)
if self.model.options["particle"] == "uniform profile":
# Fluxes are zero everywhere since the concentration is uniform
np.testing.assert_array_almost_equal(self.N_s_n(t, x_n, r_n), 0)
np.testing.assert_array_almost_equal(self.N_s_p(t, x_p, r_p), 0)
else:
if self.operating_condition == "discharge":
if self.model.options["particle"] == "quartic profile":
# quartic profile has a transient at the beginning where
# the concentration "rearranges" giving flux of the opposite
# sign, so ignore first three times
np.testing.assert_array_less(0, self.N_s_n(t[3:], x_n, r_n[1:]))
np.testing.assert_array_less(self.N_s_p(t[3:], x_p, r_p[1:]), 0)
else:
np.testing.assert_array_less(
-1e-16, self.N_s_n(t[1:], x_n, r_n[1:])
)
np.testing.assert_array_less(self.N_s_p(t[1:], x_p, r_p[1:]), 1e-16)
if self.operating_condition == "charge":
np.testing.assert_array_less(self.N_s_n(t[1:], x_n, r_n[1:]), 1e-16)
np.testing.assert_array_less(-1e-16, self.N_s_p(t[1:], x_p, r_p[1:]))
if self.operating_condition == "off":
np.testing.assert_array_almost_equal(self.N_s_n(t, x_n, r_n), 0)
np.testing.assert_array_almost_equal(self.N_s_p(t, x_p, r_p), 0)
np.testing.assert_array_almost_equal(0, self.N_s_n(t, x_n, r_n[0]), decimal=4)
np.testing.assert_array_almost_equal(0, self.N_s_p(t, x_p, r_p[0]), decimal=4)
def test_all(self):
self.test_concentration_increase_decrease()
self.test_concentration_limits()
self.test_conservation()
self.test_concentration_profile()
self.test_fluxes()
class ElectrolyteConcentrationTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.c_e = solution["Electrolyte concentration"]
self.c_e_n = solution["Negative electrolyte concentration"]
self.c_e_s = solution["Separator electrolyte concentration"]
self.c_e_p = solution["Positive electrolyte concentration"]
self.c_e_av = solution["X-averaged electrolyte concentration"]
self.c_e_n_av = solution["X-averaged negative electrolyte concentration"]
self.c_e_s_av = solution["X-averaged separator electrolyte concentration"]
self.c_e_p_av = solution["X-averaged positive electrolyte concentration"]
self.c_e_tot = solution["Total concentration in electrolyte [mol]"]
self.N_e_hat = solution["Electrolyte flux"]
# self.N_e_hat = solution["Reduced cation flux"]
def test_concentration_limit(self):
"""Test that the electrolyte concentration is always greater than zero."""
np.testing.assert_array_less(-self.c_e(self.t, self.x), 0)
def test_conservation(self):
"""Test conservation of species in the electrolyte."""
# sufficient to check average concentration is constant
diff = (
self.c_e_tot(self.solution.t[1:]) - self.c_e_tot(self.solution.t[:-1])
) / self.c_e_tot(self.solution.t[:-1])
np.testing.assert_array_almost_equal(diff, 0)
def test_concentration_profile(self):
"""Test continuity of the concentration profile. Test average concentration is
as expected and that the concentration in the negative electrode is greater
than the average and the concentration in the positive is less than the average
during a discharge."""
# TODO: uncomment when have average concentrations
# small number so that can use array less
# epsilon = 0.001
# if self.operating_condition == "discharge":
# np.testing.assert_array_less(
# -self.c_e_n_av.entries, self.c_e_av.entries + epsilon
# )
# np.testing.assert_array_less(
# self.c_e_p_av.entries, self.c_e_av.entries + epsilon
# )
# elif self.operating_condition == "charge":
# np.testing.assert_array_less(
# -self.c_e_n_av.entries, self.c_e_av.entries + epsilon
# )
# np.testing.assert_array_less(
# self.c_e_p_av.entries, self.c_e_av.entries + epsilon
# )
# elif self.operating_condition == "off":
# np.testing.assert_array_equal(self.c_e_n_av.entries, self.c_e_av.entries)
# np.testing.assert_array_equal(self.c_e_s_av.entries, self.c_e_av.entries)
# np.testing.assert_array_equal(self.c_e_p_av.entries, self.c_e_av.entries)
def test_fluxes(self):
"""Test current collector fluxes are zero. Tolerance reduced for surface form
models (bug in implementation of boundary conditions?)"""
t, x = self.t, self.x_edge
np.testing.assert_array_almost_equal(self.N_e_hat(t, x[0]), 0, decimal=3)
np.testing.assert_array_almost_equal(self.N_e_hat(t, x[-1]), 0, decimal=3)
def test_splitting(self):
"""Test that when splitting the concentrations and fluxes by negative electrode,
separator, and positive electrode, we get the correct behaviour: continuous
solution and recover combined through concatenation."""
t, x_n, x_s, x_p, x = self.t, self.x_n, self.x_s, self.x_p, self.x
c_e_combined = np.concatenate(
(self.c_e_n(t, x_n), self.c_e_s(t, x_s), self.c_e_p(t, x_p)), axis=0
)
np.testing.assert_array_equal(self.c_e(t, x), c_e_combined)
def test_all(self):
self.test_concentration_limit()
self.test_conservation()
self.test_concentration_profile()
self.test_fluxes()
self.test_splitting()
class PotentialTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.phi_s_n = solution["Negative electrode potential [V]"]
self.phi_s_p = solution["Positive electrode potential [V]"]
self.phi_s_n_av = solution["X-averaged negative electrode potential [V]"]
self.phi_s_p_av = solution["X-averaged positive electrode potential [V]"]
self.phi_e = solution["Electrolyte potential [V]"]
self.phi_e_n = solution["Negative electrolyte potential [V]"]
self.phi_e_s = solution["Separator electrolyte potential [V]"]
self.phi_e_p = solution["Positive electrolyte potential [V]"]
self.phi_e_n_av = solution["X-averaged negative electrolyte potential [V]"]
self.phi_e_p_av = solution["X-averaged positive electrolyte potential [V]"]
self.delta_phi_n = solution[
"Negative electrode surface potential difference [V]"
]
self.delta_phi_p = solution[
"Positive electrode surface potential difference [V]"
]
self.delta_phi_n_av = solution[
"X-averaged negative electrode surface potential difference [V]"
]
self.delta_phi_p_av = solution[
"X-averaged positive electrode surface potential difference [V]"
]
self.grad_phi_e = solution["Gradient of electrolyte potential"]
self.grad_phi_e_n = solution["Gradient of negative electrolyte potential"]
self.grad_phi_e_s = solution["Gradient of separator electrolyte potential"]
self.grad_phi_e_p = solution["Gradient of positive electrolyte potential"]
def test_negative_electrode_potential_profile(self):
"""Test that negative electrode potential is zero on left boundary. Test
average negative electrode potential is less than or equal to zero."""
np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)
def test_positive_electrode_potential_profile(self):
"""Test average positive electrode potential is less than the positive electrode
potential on the right current collector."""
# TODO: add these when have averages
def test_potential_differences(self):
"""Test that potential differences are the difference between electrode
potential and electrolyte potential"""
t, x_n, x_p = self.t, self.x_n, self.x_p
np.testing.assert_array_almost_equal(
self.phi_s_n(t, x_n) - self.phi_e_n(t, x_n), self.delta_phi_n(t, x_n)
)
np.testing.assert_array_almost_equal(
self.phi_s_p(t, x_p) - self.phi_e_p(t, x_p),
self.delta_phi_p(t, x_p),
decimal=5,
)
def test_average_potential_differences(self):
"""Test that average potential differences are the difference between electrode
potential and electrolyte potential"""
t = self.t
np.testing.assert_array_almost_equal(
self.phi_s_n_av(t) - self.phi_e_n_av(t), self.delta_phi_n_av(t)
)
np.testing.assert_array_almost_equal(
self.phi_s_p_av(t) - self.phi_e_p_av(t), self.delta_phi_p_av(t)
)
def test_gradient_splitting(self):
t, x_n, x_s, x_p, x = self.t, self.x_n, self.x_s, self.x_p, self.x
grad_phi_e_combined = np.concatenate(
(
self.grad_phi_e_n(t, x_n),
self.grad_phi_e_s(t, x_s),
self.grad_phi_e_p(t, x_p),
),
axis=0,
)
np.testing.assert_array_equal(self.grad_phi_e(t, x), grad_phi_e_combined)
def test_all(self):
self.test_negative_electrode_potential_profile()
self.test_positive_electrode_potential_profile()
self.test_potential_differences()
self.test_average_potential_differences()
class CurrentTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.j = solution["Interfacial current density"]
self.j0 = solution["Exchange current density"]
self.j_n = solution["Negative electrode interfacial current density"]
self.j_p = solution["Positive electrode interfacial current density"]
self.j_n_av = solution[
"X-averaged negative electrode interfacial current density"
]
self.j_p_av = solution[
"X-averaged positive electrode interfacial current density"
]
self.j_n_sei = solution["Negative electrode SEI interfacial current density"]
self.j_p_sei = solution["Positive electrode SEI interfacial current density"]
self.j_n_sei_av = solution[
"X-averaged negative electrode SEI interfacial current density"
]
self.j_p_sei_av = solution[
"X-averaged positive electrode SEI interfacial current density"
]
self.j0_n = solution["Negative electrode exchange current density"]
self.j0_p = solution["Positive electrode exchange current density"]
self.i_s_n = solution["Negative electrode current density"]
self.i_s_p = solution["Positive electrode current density"]
self.i_s = solution["Electrode current density"]
self.i_e = solution["Electrolyte current density"]
self.a_n = solution["Negative electrode surface area to volume ratio"]
self.a_p = solution["Positive electrode surface area to volume ratio"]
def test_interfacial_current_average(self):
"""Test that average of the surface area density distribution (in x)
multiplied by the interfacial current density is equal to the true
value."""
np.testing.assert_array_almost_equal(
np.mean(
self.a_n(self.t, self.x_n)
* (self.j_n(self.t, self.x_n) + self.j_n_sei(self.t, self.x_n)),
axis=0,
),
self.i_cell / self.l_n,
decimal=4,
)
np.testing.assert_array_almost_equal(
np.mean(
self.a_p(self.t, self.x_p)
* (self.j_p(self.t, self.x_p) + self.j_p_sei(self.t, self.x_p)),
axis=0,
),
-self.i_cell / self.l_p,
decimal=4,
)
def test_conservation(self):
"""Test sum of electrode and electrolyte current densities give the applied
current density"""
t, x_n, x_s, x_p = self.t, self.x_n, self.x_s, self.x_p
current_param = self.model.param.current_with_time
i_cell = self.param.process_symbol(current_param).evaluate(t=t)
for x in [x_n, x_s, x_p]:
np.testing.assert_array_almost_equal(
self.i_s(t, x) + self.i_e(t, x), i_cell, decimal=2
)
np.testing.assert_array_almost_equal(
self.i_s(t, x_n), self.i_s_n(t, x_n), decimal=3
)
np.testing.assert_array_almost_equal(
self.i_s(t, x_p), self.i_s_p(t, x_p), decimal=3
)
def test_current_density_boundaries(self):
"""Test the boundary values of the current densities"""
t, x_n, x_p = self.t, self.x_n_edge, self.x_p_edge
current_param = self.model.param.current_with_time
i_cell = self.param.process_symbol(current_param).evaluate(t=t)
np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[0]), i_cell, decimal=2)
np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[-1]), 0, decimal=4)
np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[-1]), i_cell, decimal=3)
np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[0]), 0, decimal=4)
def test_all(self):
self.test_conservation()
self.test_current_density_boundaries()
# Skip average current test if capacitance is used, since average interfacial
# current density will be affected slightly by capacitance effects
if self.model.options["surface form"] != "differential":
self.test_interfacial_current_average()
class VelocityTests(BaseOutputTest):
def __init__(self, model, param, disc, solution, operating_condition):
super().__init__(model, param, disc, solution, operating_condition)
self.v_box = solution["Volume-averaged velocity"]
self.i_e = solution["Electrolyte current density"]
self.dVbox_dz = solution["Transverse volume-averaged acceleration"]
def test_velocity_boundaries(self):
"""Test the boundary values of the current densities"""
L_x = self.x_edge[-1]
np.testing.assert_array_almost_equal(self.v_box(self.t, 0), 0, decimal=4)
np.testing.assert_array_almost_equal(self.v_box(self.t, L_x), 0, decimal=4)
def test_vertical_velocity(self):
"""Test the boundary values of the current densities"""
L_x = self.x_edge[-1]
np.testing.assert_array_equal(self.dVbox_dz(self.t, 0), 0)
np.testing.assert_array_less(self.dVbox_dz(self.t, 0.5 * L_x), 0)
np.testing.assert_array_equal(self.dVbox_dz(self.t, L_x), 0)
def test_velocity_vs_current(self):
"""Test the boundary values of the current densities"""
t, x_n, x_p = self.t, self.x_n, self.x_p
beta_n = self.model.param.beta_n
beta_n = self.param.evaluate(beta_n)
beta_p = self.model.param.beta_p
beta_p = self.param.evaluate(beta_p)
np.testing.assert_array_almost_equal(
self.v_box(t, x_n), beta_n * self.i_e(t, x_n)
)
np.testing.assert_array_almost_equal(
self.v_box(t, x_p), beta_p * self.i_e(t, x_p)
)
def test_all(self):
self.test_velocity_boundaries()
self.test_vertical_velocity()
self.test_velocity_vs_current()
|
6,701 | 54e5feee3c8bb35c351361fd3ed4b5e237e5973d | highscores = []
scores = []
while True:
user = input('> ').split(' ')
score = int(user[0])
name = user[1]
scores.append( [score, name] )
scores.sort(reverse=True)
if len(scores) < 3:
highscores = scores
else:
highscores = scores[:3]
print(highscores) |
6,702 | 533d0b883a0bbbb148f04826e4c0a2bcc31732e9 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
DIM Station Test
~~~~~~~~~~~~~~~~
Unit test for DIM Station
"""
import unittest
from dimp import ID, NetworkID
class StationTestCase(unittest.TestCase):
def test_identifier(self):
print('\n---------------- %s' % self)
str1 = 'gsp-s001@x77uVYBT1G48CLzW9iwe2dr5jhUNEM772G'
id1 = ID(str1)
self.assertEqual(id1.address.network, NetworkID.Station)
arr1 = [str1]
self.assertTrue(id1 in arr1)
def test_btc(self):
total_money = 2100 * 10000
package = 50
print('total BTC: %d, first package: %d' % (total_money, package))
spent = 0
order = 0
day = 0
year = 0
while (spent + package) <= total_money:
spent += package
order += 1
if order % (6 * 24) == 0:
day += 1
if day % 365 == 0:
year += 1
print('year %d, day %d: package=%f, spent=%f' % (year, day, package, spent))
if year % 4 == 0:
package /= 2.0
print('BTC OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (year, day, package, spent, (total_money - spent)))
def test_dimt(self):
total_money = 15 * 10000 * 10000
package = 2 ** 20
print('total money: %d, first package: %d' % (total_money, package))
spent = 0
day = 0
year = 0
while (spent + package) <= total_money and package >= 1:
spent += package
day += 1
if day % 365 == 0:
year += 1
print('year %d, day %d: package=%f, spent=%f' % (year, day, package, spent))
if year % 2 == 0:
package /= 2.0
print('DIMT OVER! year=%d, day=%d, pack=%f, spent=%f, left=%f' % (year, day, package, spent, (total_money - spent)))
if __name__ == '__main__':
unittest.main()
|
6,703 | 0438f92aa9a36eaf1059244ec3be4397381f7a86 | import pyodbc
print("Primera consulta SQL Server")
servidor="LOCALHOST\SQLEXPRESS"
bbdd="HOSPITAL"
usuario="SA"
password="azure"
#CADENA CONEXION CON SEGURIDAD SQL SERVER (REMOTO)
cadenaconexion=("DRIVER={ODBC Driver 17 for SQL Server};SERVER=" + servidor
+ "; DATABASE=" + bbdd + "; UID=" + usuario + "; PWD=" + password)
print("Intentando conectar...")
conexion = pyodbc.connect(cadenaconexion)
print("Conectado!!!")
#CURSOR se crea con una conexión abierta
cursor = conexion.cursor()
#Necesitamos una consulta, el cursor maneja tanto consultas de selección (SELECT)
#como consultas de acción, no le importa
#Creamos la consulta select
sql = "select * from dept"
#El cursor ejecutará la consulta
cursor.execute(sql)
#Podemos, por ejemplo, recuperar una fila
row = cursor.fetchone()
#Vamos a dibujar la fila
print(row)
#Vamos a escribir otra vez fetchone()
row = cursor.fetchone()
print(row)
#Cada vez que ejecutamos el método fetch
#el cursor se mueve una fila
#No podemos volver a la fila anterior,
#tendríamos que ejecutar otra vez el método
#execute() de la conexión
#Vamos a pasarnos de filas a ver que sucede
row = cursor.fetchone()
print(row)
row = cursor.fetchone()
print(row)
#Siempre debemos cerrar el cursor despues de leer
conexion.close()
print("Fin de programa") |
6,704 | 82083f16c18db35193fa2aa45bc28c5201962f90 |
import re
match = re.search(r'pi+', 'piiig')
print 'found', match.group() == "piii"
|
6,705 | f73a3bd7665ac9cc90085fcac2530c93bef69d3d | # coding: utf-8
import sys
#from operator import itemgetter
sysread = sys.stdin.readline
read = sys.stdin.read
from heapq import heappop, heappush
from collections import defaultdict
sys.setrecursionlimit(10**7)
import math
#from itertools import product#accumulate, combinations, product
#import bisect# lower_bound etc
#import numpy as np
#from copy import deepcopy
def run():
mod = 1000000007
N, *AB = map(int, read().split())
A_B = []
INF = float('inf')
zerozero = 0
for i in range(N):
a = AB[2*i]
b = AB[2*i+1]
if a== 0 and b == 0:
zerozero += 1
elif b == 0:
A_B.append((INF, 0))
elif a == 0:
A_B.append((0, INF))
else:
tmp = math.gcd(a,b)
if a / b > 0 :v = 1
else: v = -1
A_B.append((abs(a//tmp), v * abs(b//tmp)))
comb_dict = defaultdict(lambda:[0,0])
for ai, bi in A_B:
if ai == INF:
comb_dict[0][1] += 1
elif bi == INF:
comb_dict[0][0] += 1
elif bi < 0:
comb_dict[(ai,bi)][0] += 1
else:
comb_dict[(bi, -ai)][1] += 1
ret = 1
for _, val_list in comb_dict.items():
a,b = val_list
if a == 0 or b == 0:
ret *= pow(2, max(a,b), mod)
else:
ret *= pow(2, a, mod) + pow(2, b, mod) - 1
ret %= mod
ret += zerozero-1
print(ret%mod)
if __name__ == "__main__":
run() |
6,706 | 808fe8f106eaff00cf0080edb1d8189455c4054b | import numpy as np
def find_saddle_points(A):
B = []
for i in range(A.shape[0]):
min_r = np.min(A[i])
ind_r = 0
max_c = 0
ind_c = 0
for j in range(A.shape[1]):
if (A[i][j] == min_r):
min_r = A[i][j]
ind_r = j
for k in range(A.shape[0]):
if (A[k][ind_r] >= max_c):
max_c = A[k][ind_r]
ind_c = k
if (min_r == max_c): B.append(min_r)
print('Saddle points are:', B)
return B
'''
def check_dominant_rows(A):
B = []
for i in range(A.shape[0]-1):
for j in range(i+1, A.shape[0]):
if (all(np.greater_equal(A[i], A[j]))):
if (B.count(j) == 0):
B.append(j)
elif (all(np.less_equal(A[i], A[j]))):
if (B.count(i) == 0):
B.append(i)
print(B)
A = np.delete(A, (B), axis = 0)
print(A)
return A
def check_dominant_columns(A):
B = []
for i in range(A.shape[1]-1):
for j in range(i+1, A.shape[1]):
if (all(np.greater_equal(A[:,i], A[:,j]))):
if (B.count(i) == 0):
B.append(i)
elif (all(np.less_equal(A[:,i], A[:,j]))):
if (B.count(j) == 0):
B.append(j)
print(B)
A = np.delete(A, (B), axis = 1)
print(A)
return A
'''
def canonical_form(A):
I = np.eye(A.shape[0])
Z = np.zeros(A.shape[0])
I = np.vstack((I, Z))
B = np.array([-1] * A.shape[1])
A = np.vstack((A, B))
A = np.hstack((A, I))
O = np.ones((A.shape[0], 1))
A = np.hstack((O, A))
A[-1][0] = 0
print('Канонический вид матрицы:\n', A, '\n')
return A
def select_lead_column(A):
min_el = A[-1][1]
index = 1
for i in range(2, A.shape[1]):
if ((min_el >= A[-1][i]) and (A[-1][i] < 0)):
min_el = A[-1][i]
index = i
print('Главный столбец с индексом', index)
return index
def select_lead_row(A, col):
for i in range(A.shape[0]):
if (A[i][col] > 0):
min_quo = A[i][0] / A[i][col]
index = i
break
for i in range(1, A.shape[0]-1):
if (A[i][col] == 0): continue
if ((min_quo >= (A[i][0] / A[i][col])) and (A[i][col] > 0)):
min_quo = A[i][0] / A[i][col]
index = i
print('Главная строка с индексом', index)
return index
def check_solved(A):
solved = True
for j in range(1, A.shape[1]):
if (A[-1][j] < 0):
solved = False
return solved
def conversion(A, c, r):
main_element = A[r][c]
print('Главный элемент = ', main_element, '\n')
for i in range(A.shape[1]):
A[r][i] /= main_element
for i in range(A.shape[0]):
if (i != r):
A[i] += (A[r] * A[i][c] * (-1))
print(A)
def nash_equilibrium(A):
min_elem = A.min()
if min_elem < 0:
A += abs(min_elem) + 1
if (find_saddle_points(A)):
print('Здесь решение в чистых стратегиях')
else:
f = A.shape[0]
A = canonical_form(A)
while (check_solved(A) == False):
column = select_lead_column(A)
row = select_lead_row(A, column)
conversion(A, column, row)
get_strategies(A, f)
def get_strategies(A, f):
P = []
Q = []
F = 1 / A[-1][0]
for i in range(f+1, A.shape[1]):
P.append(F * A[-1][i])
# Исправить для Q
print('Цена игры равна = ', F)
# print('Стратегии первого игрока:', Q)
print('Стратегии второго игрока:', P)
'''
n = int(input())
m = int(input())
A = []
for i in range(n):
row = input().split()
if (len(row) != m):
sys.exit()
for i in range(len(row)):
row[i] = int(row[i])
A.append(row)
A = np.array(A)
A = np.array([[0, 2, 7],
[12, 11, 1]])
A = np.array([[6, 5, 7],
[10, 4, 7],
[13, 10, 4],
[7, 11, 5]])
'''
A = np.array([[4, 0, 6, 2, 2, 1],
[3, 8, 4, 10, 4, 4],
[1, 2, 6, 5, 0, 0],
[6, 6, 4, 4, 10, 3],
[10, 4, 6, 4, 0, 9],
[10, 7, 0, 7, 9, 8]])
nash_equilibrium(A)
|
6,707 | 586d39556d2922a288a2bef3bcffbc6f9e3dc39d | import os
import random
import cv2
import numpy as np
from keras.preprocessing.image import img_to_array
import numpy as np
import keras
from scipy import ndimage, misc
def preprocess_image(img):
img = img.astype(np.uint8)
(channel_b, channel_g, channel_r) = cv2.split(img)
result = ndimage.maximum_filter(channel_g, size=5)
# ret3,result = cv2.threshold(result,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ret,result = cv2.threshold(channel_g,120,255,cv2.THRESH_BINARY_INV)
clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(11, 11))
clahe_g = clahe.apply(channel_g)
image = np.zeros((img.shape[0], img.shape[1], img.shape[2]))
image[:,:,0] = channel_g
image[:,:,1] = clahe_g
image[:,:,2] = result
image = image.astype(np.uint8)
image = img_to_array(image)
return image
def preprocess_mask(img):
img = img.astype(np.uint8)
return img[:,:,0].reshape((256,256,1))
# img=cv2.imread("/home/team6/Project/MiMM_SBILab/patches/train/images/0/1015.jpg")
# img_result=preprocess_image(img)
# cv2.imwrite("preprocess.jpg",img_result)
|
6,708 | 7620d76afc65ceb3b478f0b05339ace1f1531f7d | def strictly_greater_than(value):
if value : # Change this line
return "Greater than 100"
elif value : # Change this line
return "Greater than 10"
else:
return "10 or less"
# Change the value 1 below to experiment with different values
print(strictly_greater_than(1))
|
6,709 | c95eaa09241428f725d4162e0e9f6ed3ce6f8fdd | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from django.contrib import admin
from accounts.models import (UserProfile)
admin.site.register(UserProfile)
admin.site.unregister(User)
class CustomUserAdmin(UserAdmin):
list_display = ('username', 'email', 'is_staff','is_active',)
list_filter = ('is_staff', 'is_superuser', 'is_active',)
admin.site.register(User, CustomUserAdmin) |
6,710 | 9db4bca3e907d70d9696f98506efb6d6042b5723 |
from mcse.core.driver import BaseDriver_
class DimerGridSearch(BaseDriver_):
"""
Generates all dimer structures that should be considered for a grid search
to find the best dimer arangements. Grid search is performed over all
x,y,z positions for the COM and all orientations of the molecule. Only
dimers with physically relevant intermolecular distances are kept for the
user by providing maximum and minimum scaled vdW distances as max_sr and
min_sr. Grid search can be performed using a a single unqiue molecule or
two distinct molecules as input.
This method is parallelized using MPI. The user may launch as many MPI ranks
as they would like in order to reduce the computational burden for each
rank and speedup the time-to-solution.
Arguments
---------
min_sr: float
Minimum specific radius to use for dimer distance checks.
max_sr: float
Maximum specific radius multiplier that is allowed to be the minimum
distance between two dimers, thereby removing dimers formed from molecules
that are far away.
box: float,list
Box size to search over for x,y,z positions. It's assumes that first
molecule of the dimer will be placed at 0,0,0. If the box size is a
float, a box will be placed at 0,0,0 and will extend by this value in
all directions. If a list is provided, the box will only extend by these
lengths in the x,y,z directions respectively, and due to symmetry, in
the -x,-y,-z directions. Default behavior is that the box size will
automatically be detected based on the size of the input molecules.
grid_spacing: float
Grid spacing to use for x,y,z position spacing
angle_spacing: float
Spacing of orientation angles to use for every x,y,z position. Assumed
to be in degrees.
cutoff: float
Distance between COM to neglect from dimer grid search.
tol: float
Tolerance used for the rmsd comparison. If the difference between the
structures is less than tol, then they are considered duplicates.
vdw: list
List of all vdw radii for all elements in periodic table
bonds_kw: dict
Keyword arguments for Structure.get_bonds method. This is used
for recognizing molecular connectivity.
inter_list: list
List of tuples of elements that should be considered for the distance
calculations. For example ("Li", "O"). Then, if the distance between the
Li in one molecule and the O in another molecule is outside the min_sr
to max_sr range then the dimer system will be removed. This is helpful
to reduce the search space based on chemical intution.
"""
def __init__(self,
folder="",
min_sr=0.75,
max_sr=1.30,
box=-1,
grid_spacing=2.5,
angle_spacing=30,
inter_list=[],
tol=0.1,
vdw=[],
bonds_kw={},
comm=None):
raise Exception() |
6,711 | f4094a81f90cafc9ae76b8cf902221cbdbc4871a | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainMenu.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(750, 529)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(600, 400))
MainWindow.setMaximumSize(QtCore.QSize(1000, 1000))
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setMaximumSize(QtCore.QSize(1000, 1000))
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.stackedWidget.sizePolicy().hasHeightForWidth())
self.stackedWidget.setSizePolicy(sizePolicy)
self.stackedWidget.setObjectName("stackedWidget")
self.Play = QtWidgets.QWidget()
self.Play.setObjectName("Play")
self.gridLayout_5 = QtWidgets.QGridLayout(self.Play)
self.gridLayout_5.setObjectName("gridLayout_5")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_6 = QtWidgets.QPushButton(self.Play)
self.pushButton_6.setObjectName("pushButton_6")
self.horizontalLayout.addWidget(self.pushButton_6)
self.pushButton_4 = QtWidgets.QPushButton(self.Play)
self.pushButton_4.setObjectName("pushButton_4")
self.horizontalLayout.addWidget(self.pushButton_4)
self.gridLayout_5.addLayout(self.horizontalLayout, 6, 1, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 249, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem, 5, 1, 1, 1)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.label_6 = QtWidgets.QLabel(self.Play)
self.label_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_6.setObjectName("label_6")
self.horizontalLayout_2.addWidget(self.label_6)
self.spinBox = QtWidgets.QSpinBox(self.Play)
self.spinBox.setObjectName("spinBox")
self.horizontalLayout_2.addWidget(self.spinBox)
self.label_7 = QtWidgets.QLabel(self.Play)
self.label_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_7.setObjectName("label_7")
self.horizontalLayout_2.addWidget(self.label_7)
self.spinBox_2 = QtWidgets.QSpinBox(self.Play)
self.spinBox_2.setObjectName("spinBox_2")
self.horizontalLayout_2.addWidget(self.spinBox_2)
self.gridLayout_5.addLayout(self.horizontalLayout_2, 2, 1, 1, 1)
self.pushButton_8 = QtWidgets.QPushButton(self.Play)
self.pushButton_8.setObjectName("pushButton_8")
self.gridLayout_5.addWidget(self.pushButton_8, 4, 1, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_5 = QtWidgets.QLabel(self.Play)
self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_5.setObjectName("label_5")
self.horizontalLayout_3.addWidget(self.label_5)
self.textEdit = QtWidgets.QTextEdit(self.Play)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setMaximumSize(QtCore.QSize(50, 30))
self.textEdit.setObjectName("textEdit")
self.horizontalLayout_3.addWidget(self.textEdit)
self.gridLayout_5.addLayout(self.horizontalLayout_3, 1, 1, 1, 1)
self.pushButton_7 = QtWidgets.QPushButton(self.Play)
self.pushButton_7.setObjectName("pushButton_7")
self.gridLayout_5.addWidget(self.pushButton_7, 3, 1, 1, 1)
self.label_8 = QtWidgets.QLabel(self.Play)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.gridLayout_5.addWidget(self.label_8, 0, 1, 1, 1)
self.mplwindow_3 = QtWidgets.QWidget(self.Play)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mplwindow_3.sizePolicy().hasHeightForWidth())
self.mplwindow_3.setSizePolicy(sizePolicy)
self.mplwindow_3.setToolTipDuration(-22)
self.mplwindow_3.setObjectName("mplwindow_3")
self.mplvl_3 = QtWidgets.QVBoxLayout(self.mplwindow_3)
self.mplvl_3.setObjectName("mplvl_3")
self.gridLayout_5.addWidget(self.mplwindow_3, 0, 0, 7, 1)
self.stackedWidget.addWidget(self.Play)
self.Viewer = QtWidgets.QWidget()
self.Viewer.setObjectName("Viewer")
self.gridLayout_4 = QtWidgets.QGridLayout(self.Viewer)
self.gridLayout_4.setObjectName("gridLayout_4")
self.mplwindow_2 = QtWidgets.QWidget(self.Viewer)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mplwindow_2.sizePolicy().hasHeightForWidth())
self.mplwindow_2.setSizePolicy(sizePolicy)
self.mplwindow_2.setToolTipDuration(-22)
self.mplwindow_2.setObjectName("mplwindow_2")
self.mplvl_2 = QtWidgets.QVBoxLayout(self.mplwindow_2)
self.mplvl_2.setObjectName("mplvl_2")
self.gridLayout_4.addWidget(self.mplwindow_2, 0, 0, 3, 1)
self.checkBox = QtWidgets.QCheckBox(self.Viewer)
self.checkBox.setObjectName("checkBox")
self.gridLayout_4.addWidget(self.checkBox, 0, 1, 1, 2, QtCore.Qt.AlignHCenter)
self.label_4 = QtWidgets.QLabel(self.Viewer)
self.label_4.setObjectName("label_4")
self.gridLayout_4.addWidget(self.label_4, 1, 1, 1, 2, QtCore.Qt.AlignHCenter)
self.BackButton = QtWidgets.QPushButton(self.Viewer)
self.BackButton.setObjectName("BackButton")
self.gridLayout_4.addWidget(self.BackButton, 2, 1, 1, 1)
self.pushButton_2 = QtWidgets.QPushButton(self.Viewer)
self.pushButton_2.setObjectName("pushButton_2")
self.gridLayout_4.addWidget(self.pushButton_2, 2, 2, 1, 1)
self.mplwindow = QtWidgets.QWidget(self.Viewer)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mplwindow.sizePolicy().hasHeightForWidth())
self.mplwindow.setSizePolicy(sizePolicy)
self.mplwindow.setToolTipDuration(-22)
self.mplwindow.setObjectName("mplwindow")
self.mplvl = QtWidgets.QVBoxLayout(self.mplwindow)
self.mplvl.setObjectName("mplvl")
self.gridLayout_4.addWidget(self.mplwindow, 3, 0, 3, 1)
self.label_3 = QtWidgets.QLabel(self.Viewer)
self.label_3.setObjectName("label_3")
self.gridLayout_4.addWidget(self.label_3, 3, 1, 1, 2)
self.mplfigs = QtWidgets.QListWidget(self.Viewer)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.mplfigs.sizePolicy().hasHeightForWidth())
self.mplfigs.setSizePolicy(sizePolicy)
self.mplfigs.setMaximumSize(QtCore.QSize(200, 16777215))
self.mplfigs.setObjectName("mplfigs")
self.gridLayout_4.addWidget(self.mplfigs, 4, 1, 1, 2)
self.pushButton_5 = QtWidgets.QPushButton(self.Viewer)
self.pushButton_5.setObjectName("pushButton_5")
self.gridLayout_4.addWidget(self.pushButton_5, 5, 1, 1, 1)
self.pushButton_3 = QtWidgets.QPushButton(self.Viewer)
self.pushButton_3.setObjectName("pushButton_3")
self.gridLayout_4.addWidget(self.pushButton_3, 5, 2, 1, 1)
self.stackedWidget.addWidget(self.Viewer)
self.SavedGames = QtWidgets.QWidget()
self.SavedGames.setObjectName("SavedGames")
self.gridLayout_3 = QtWidgets.QGridLayout(self.SavedGames)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_2 = QtWidgets.QLabel(self.SavedGames)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth())
self.label_2.setSizePolicy(sizePolicy)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.gridLayout_3.addWidget(self.label_2, 0, 0, 1, 1)
self.Autosave = QtWidgets.QPushButton(self.SavedGames)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Autosave.sizePolicy().hasHeightForWidth())
self.Autosave.setSizePolicy(sizePolicy)
self.Autosave.setObjectName("Autosave")
self.gridLayout_3.addWidget(self.Autosave, 1, 0, 1, 1)
self.Save1 = QtWidgets.QPushButton(self.SavedGames)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Save1.sizePolicy().hasHeightForWidth())
self.Save1.setSizePolicy(sizePolicy)
self.Save1.setObjectName("Save1")
self.gridLayout_3.addWidget(self.Save1, 2, 0, 1, 1)
self.Save2 = QtWidgets.QPushButton(self.SavedGames)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Save2.sizePolicy().hasHeightForWidth())
self.Save2.setSizePolicy(sizePolicy)
self.Save2.setObjectName("Save2")
self.gridLayout_3.addWidget(self.Save2, 3, 0, 1, 1)
self.Save3 = QtWidgets.QPushButton(self.SavedGames)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Save3.sizePolicy().hasHeightForWidth())
self.Save3.setSizePolicy(sizePolicy)
self.Save3.setObjectName("Save3")
self.gridLayout_3.addWidget(self.Save3, 4, 0, 1, 1)
self.stackedWidget.addWidget(self.SavedGames)
self.MainMenu = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MainMenu.sizePolicy().hasHeightForWidth())
self.MainMenu.setSizePolicy(sizePolicy)
self.MainMenu.setObjectName("MainMenu")
self.gridLayout_2 = QtWidgets.QGridLayout(self.MainMenu)
self.gridLayout_2.setObjectName("gridLayout_2")
self.label = QtWidgets.QLabel(self.MainMenu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMaximumSize(QtCore.QSize(16777215, 100))
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 1, 1, 2)
self.NewGame = QtWidgets.QPushButton(self.MainMenu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.NewGame.sizePolicy().hasHeightForWidth())
self.NewGame.setSizePolicy(sizePolicy)
self.NewGame.setMinimumSize(QtCore.QSize(0, 0))
self.NewGame.setMaximumSize(QtCore.QSize(1000, 100))
self.NewGame.setLayoutDirection(QtCore.Qt.LeftToRight)
self.NewGame.setObjectName("NewGame")
self.gridLayout_2.addWidget(self.NewGame, 1, 1, 1, 2)
self.Continue = QtWidgets.QPushButton(self.MainMenu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Continue.sizePolicy().hasHeightForWidth())
self.Continue.setSizePolicy(sizePolicy)
self.Continue.setMaximumSize(QtCore.QSize(1000, 100))
self.Continue.setObjectName("Continue")
self.gridLayout_2.addWidget(self.Continue, 2, 1, 1, 2)
self.Records = QtWidgets.QPushButton(self.MainMenu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Records.sizePolicy().hasHeightForWidth())
self.Records.setSizePolicy(sizePolicy)
self.Records.setMaximumSize(QtCore.QSize(1000, 100))
self.Records.setLayoutDirection(QtCore.Qt.LeftToRight)
self.Records.setObjectName("Records")
self.gridLayout_2.addWidget(self.Records, 3, 1, 1, 2)
self.Options = QtWidgets.QPushButton(self.MainMenu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.Options.sizePolicy().hasHeightForWidth())
self.Options.setSizePolicy(sizePolicy)
self.Options.setMaximumSize(QtCore.QSize(500, 100))
self.Options.setObjectName("Options")
self.gridLayout_2.addWidget(self.Options, 4, 1, 1, 1)
self.QuitGame = QtWidgets.QPushButton(self.MainMenu)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.QuitGame.sizePolicy().hasHeightForWidth())
self.QuitGame.setSizePolicy(sizePolicy)
self.QuitGame.setMaximumSize(QtCore.QSize(500, 100))
self.QuitGame.setObjectName("QuitGame")
self.gridLayout_2.addWidget(self.QuitGame, 4, 2, 1, 1)
spacerItem1 = QtWidgets.QSpacerItem(265, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem1, 5, 0, 1, 1)
spacerItem2 = QtWidgets.QSpacerItem(265, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem2, 5, 3, 1, 1)
self.stackedWidget.addWidget(self.MainMenu)
self.gridLayout.addWidget(self.stackedWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 750, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.stackedWidget.setCurrentIndex(3)
self.QuitGame.clicked.connect(MainWindow.close)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_6.setText(_translate("MainWindow", "Save"))
self.pushButton_4.setText(_translate("MainWindow", "Main Menu"))
self.label_6.setText(_translate("MainWindow", "x :"))
self.label_7.setText(_translate("MainWindow", "y :"))
self.pushButton_8.setText(_translate("MainWindow", "Undo"))
self.label_5.setText(_translate("MainWindow", "Next move by:"))
self.textEdit.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Sans Serif\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Red</p></body></html>"))
self.pushButton_7.setText(_translate("MainWindow", "Move"))
self.label_8.setText(_translate("MainWindow", "PvP Mode"))
self.checkBox.setText(_translate("MainWindow", "Toggle Overlay"))
self.label_4.setText(_translate("MainWindow", "Step Through Game"))
self.BackButton.setText(_translate("MainWindow", "Backwards"))
self.pushButton_2.setText(_translate("MainWindow", "Forwards"))
self.label_3.setText(_translate("MainWindow", "Recorded Games"))
self.pushButton_5.setText(_translate("MainWindow", "Play Current"))
self.pushButton_3.setText(_translate("MainWindow", "Main Menu"))
self.label_2.setText(_translate("MainWindow", "Saved Games"))
self.Autosave.setText(_translate("MainWindow", "Autosave"))
self.Save1.setText(_translate("MainWindow", "Save Slot 1"))
self.Save2.setText(_translate("MainWindow", "Save Slot 2"))
self.Save3.setText(_translate("MainWindow", "Save Slot 3"))
self.label.setText(_translate("MainWindow", "Welcome to FIAR"))
self.NewGame.setText(_translate("MainWindow", "New Game"))
self.Continue.setText(_translate("MainWindow", "Continue"))
self.Records.setText(_translate("MainWindow", "Records"))
self.Options.setText(_translate("MainWindow", "Options"))
self.QuitGame.setText(_translate("MainWindow", "Quit Game"))
|
6,712 | bf05a096956ca4f256832e2fc6659d42c5611796 | # Generated by Django 3.1.2 on 2021-02-13 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0014_profilepic_user'),
]
operations = [
migrations.CreateModel(
name='profile_pic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.CharField(max_length=100)),
('pic', models.ImageField(blank=True, null=True, upload_to='profilepicture/')),
],
),
]
|
6,713 | b7721e95cfb509a7c0c6ccdffa3a8ca2c6bd6033 | from numpy import array, sum
def comp_point_ref(self, is_set=False):
"""Compute the point ref of the Surface
Parameters
----------
self : SurfLine
A SurfLine object
is_set: bool
True to update the point_ref property
Returns
-------
point_ref : complex
the reference point of the surface
"""
point_list = list()
for line in self.get_lines():
point_list.append(line.get_middle())
point_ref = sum(array(point_list)) / len(point_list)
if is_set:
self.point_ref = point_ref
return point_ref
|
6,714 | 95ab8fce573ef959946d50d9af6e893cb8798917 | """Functions for updating and performing bulk inference using an Keras MPNN model"""
from typing import List, Dict, Tuple
import numpy as np
import tensorflow as tf
from molgym.mpnn.data import convert_nx_to_dict
from molgym.mpnn.layers import custom_objects
from molgym.utils.conversions import convert_smiles_to_nx
# TODO (wardlt): Make this Keras message object usable elsewhere
class MPNNMessage:
"""Package for sending an MPNN model over pickle"""
def __init__(self, model: tf.keras.Model):
"""
Args:
model: Model to be sent
"""
self.config = model.to_json()
# Makes a copy of the weights to ensure they are not memoryview objects
self.weights = [np.array(v) for v in model.get_weights()]
def get_model(self) -> tf.keras.Model:
model = tf.keras.models.model_from_json(self.config, custom_objects=custom_objects)
model.set_weights(self.weights)
return model
def _merge_batch(mols: List[dict]) -> dict:
"""Merge a list of molecules into a single batch
Args:
mols: List of molecules in dictionary format
Returns:
Single batch of molecules
"""
# Convert arrays to array
# Stack the values from each array
batch = dict(
(k, np.concatenate([np.atleast_1d(m[k]) for m in mols], axis=0))
for k in mols[0].keys()
)
# Compute the mappings from bond index to graph index
batch_size = len(mols)
mol_id = np.arange(batch_size, dtype=np.int)
batch['node_graph_indices'] = np.repeat(mol_id, batch['n_atom'], axis=0)
batch['bond_graph_indices'] = np.repeat(mol_id, batch['n_bond'], axis=0)
# Compute offsets for the connectivity matrix
offset_values = np.zeros(batch_size, dtype=np.int)
np.cumsum(batch['n_atom'][:-1], out=offset_values[1:])
offsets = np.repeat(offset_values, batch['n_bond'], axis=0)
batch['connectivity'] += np.expand_dims(offsets, 1)
return batch
def evaluate_mpnn(model_msg: MPNNMessage, smiles: List[str],
atom_types: List[int], bond_types: List[str], batch_size: int = 128) -> np.ndarray:
"""Run inference on a list of molecules
Args:
model_msg: Serialized version of the model
smiles: List of molecules to evaluate
atom_types: List of known atom types
bond_types: List of known bond types
batch_size: List of molecules to create into matches
Returns:
Predicted value for each molecule
"""
# Rebuild the model
tf.keras.backend.clear_session()
model = model_msg.get_model()
# Convert all SMILES strings to batches of molecules
# TODO (wardlt): Use multiprocessing. Could benefit from a persistent Pool to avoid loading in TF many times
mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types, bond_types) for s in smiles]
chunks = [mols[start:start + batch_size] for start in range(0, len(mols), batch_size)]
batches = [_merge_batch(c) for c in chunks]
# Feed the batches through the MPNN
outputs = [model.predict_on_batch(b) for b in batches]
return np.vstack(outputs)
# TODO (wardlt): Move to the MPNN library?
class GraphLoader(tf.keras.utils.Sequence):
"""Keras-compatible data loader for training a graph problem"""
def __init__(self, smiles: List[str], atom_types: List[int], bond_types: List[str],
outputs: List[float], batch_size: int, shuffle: bool = True, random_state: int = None):
"""
Args:
smiles: List of molecules
atom_types: List of known atom types
bond_types: List of known bond types
outputs: List of molecular outputs
batch_size: Number of batches to use to train model
shuffle: Whether to shuffle after each epoch
random_state: Random state for the shuffling
"""
super(GraphLoader, self).__init__()
# Convert the molecules to MPNN-ready formats
mols = [convert_nx_to_dict(convert_smiles_to_nx(s), atom_types, bond_types) for s in smiles]
self.entries = np.array(list(zip(mols, outputs)))
# Other data
self.batch_size = batch_size
self.shuffle = shuffle
# Give it a first shuffle, if needed
self.rng = np.random.RandomState(random_state)
if shuffle:
self.rng.shuffle(self.entries)
def __getitem__(self, item):
# Get the desired chunk of entries
start = item * self.batch_size
chunk = self.entries[start:start + self.batch_size]
# Get the molecules and outputs out
mols, y = zip(*chunk)
x = _merge_batch(mols)
return x, np.array(y)
def __len__(self):
# Get the number of batches
train_size = len(self.entries)
n_batches = train_size // self.batch_size
# Add a partially-full batch at the end
if train_size % self.batch_size != 0:
n_batches += 1
return n_batches
# TODO (wardlt): Evaluate whether the model stays in memory after training. If so, clear graph?
def update_mpnn(model_msg: MPNNMessage, database: Dict[str, float], num_epochs: int,
atom_types: List[int], bond_types: List[str], batch_size: int = 512,
validation_split: float = 0.1, random_state: int = 1, learning_rate: float = 1e-3)\
-> Tuple[List, dict]:
"""Update a model with new training sets
Args:
model_msg: Serialized version of the model
database: Training dataset of molecule mapped to a property
atom_types: List of known atom types
bond_types: List of known bond types
num_epochs: Number of epochs to run
batch_size: Number of molecules per training batch
validation_split: Fraction of molecules used for the training/validation split
random_state: Seed to the random number generator. Ensures entries do not move between train
and validation set as the database becomes larger
learning_rate: Learning rate for the Adam optimizer
Returns:
model: Updated weights
history: Training history
"""
# Rebuild the model
tf.keras.backend.clear_session()
model = model_msg.get_model()
model.compile(tf.keras.optimizers.Adam(lr=learning_rate), 'mean_absolute_error')
# Separate the database into molecules and properties
smiles, y = zip(*database.items())
# Make the training and validation splits
# Use a random number generator with fixed seed to ensure that the validation
# set is never polluted with entries from the training set
# TODO (wardlt): Replace with passing train and validation separately?
rng = np.random.RandomState(random_state)
train_split = rng.rand(len(smiles)) > validation_split
# Make the loaders
smiles = np.array(smiles)
y = np.array(y)
train_loader = GraphLoader(smiles[train_split], atom_types, bond_types, y[train_split],
batch_size=batch_size)
val_loader = GraphLoader(smiles[~train_split], atom_types, bond_types, y[~train_split],
batch_size=batch_size, shuffle=False)
# Run the desired number of epochs
# TODO (wardlt): Should we use callbacks to get only the "best model" based on the validation set?
history = model.fit(train_loader, epochs=num_epochs, validation_data=val_loader, verbose=False)
return [np.array(v) for v in model.get_weights()], history.history
|
6,715 | daf070291bbf59a7a06b129bbde5fd79b5cd46ad | '''
Created on Mar 19, 2019
@author: malte
'''
import gc
import pickle
from hyperopt import tpe, hp
from hyperopt.base import Trials
from hyperopt.fmin import fmin
from config.globals import BASE_PATH
from domain.features import FEATURES
from evaluate import evaluate
from featuregen.create_set import create_set
from helper.df_ops import train_test_split_idx
import lightgbm as lgbm
import numpy as np
import pandas as pd
#PATH
RAW = 'raw/'
SET = 'sample/'
CONF = {
'train_only': False,
'pop_hidden': False,
'path_pop': BASE_PATH + SET,
'min_pop': None,
'price_hidden': False,
'path_price': BASE_PATH + SET,
'min_occurences': None,
'fillna_mean': False,
'path_session': BASE_PATH + SET,
'path_crawl': BASE_PATH + 'crawled/',
'path_poi': BASE_PATH + SET,
'path_meta': BASE_PATH + 'preprocessed/',
'meta_latent': 'd2v',
'path_latent': BASE_PATH + 'competition/',
}
#KEYS
DSKEY = 'dataset'
TRAILKEY = 'trails-lgbm'
def objective( params ):
train = create_set( base_path=BASE_PATH + SET, conf=CONF, key=DSKEY, redo=False )
test = train.query('train == 0')
train.query('train == 1', inplace=True)
X = train[ FEATURES + ['session_id'] ]
y = train[ 'label' ]
del train
gc.collect()
X_train, X_valid = train_test_split_idx( X, y, test_size=0.1, shuffle=params['shuffle'] )
print( 'shuffled sample ',params['shuffle'] )
if params['ltr']:
params['application'] = 'lambdarank'
params['metric'] = 'ndcg'
params['eval_at'] = '30'
else:
params['application'] = 'binary'
params['metric'] = 'binary_logloss'
if params['ltr']:
q_train = X.loc[X_train].groupby( ['session_id'] ).size().values.astype(np.float32)
q_valid = X.loc[X_valid].groupby( ['session_id'] ).size().values.astype(np.float32)
xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)
ytrain = y.loc[X_train].values.astype(np.float32)
del X_train
gc.collect()
d_train = lgbm.Dataset( xtrain, label=ytrain, group=q_train, feature_name=FEATURES)#, categorical_feature=CAT_FEATURES )
del q_train, xtrain, ytrain
gc.collect()
xval = X.loc[X_valid][FEATURES].values.astype(np.float32)
yval = y.loc[X_valid].values.astype(np.float32)
del X_valid
gc.collect()
d_valid = lgbm.Dataset( xval, label=yval, group=q_valid, feature_name=FEATURES)#, categorical_feature=CAT_FEATURES )
del q_valid, xval, yval
gc.collect()
else:
xtrain = X.loc[X_train][FEATURES].values.astype(np.float32)
ytrain = y.loc[X_train].values.astype(np.float32)
d_train = lgbm.Dataset( xtrain, label=ytrain, feature_name=FEATURES )#+ ['session_id'])#, categorical_feature=CAT_FEATURES )
del xtrain, xtrain, X_train
gc.collect()
xval = X[X_valid][FEATURES].values.astype(np.float32)
yval = y[X_valid].values.astype(np.float32)
d_valid = lgbm.Dataset( xval, label=yval, feature_name=FEATURES )#+ ['session_id'])#, categorical_feature=CAT_FEATURES )
del xval, yval, X_valid
gc.collect()
watchlist = [d_train, d_valid]
evals_result = {}
model = lgbm.train( params, train_set=d_train, num_boost_round=10000, valid_sets=watchlist, early_stopping_rounds=int(params['early_stopping']), evals_result=evals_result, verbose_eval=10 )
X_test = test[ FEATURES ]
y_test = model.predict(X_test, num_iteration=model.best_iteration )
test['prob'] = y_test
test = test.sort_values(['session_id','prob'], ascending=False)
solution = pd.DataFrame()
solution['recommendations'] = test.groupby( 'session_id' ).impressions.apply( list )
solution['confidences'] = test.groupby( 'session_id' ).prob.apply( list )
solution.reset_index(drop=True)
solution = solution.merge( test[['session_id', 'user_id', 'timestamp', 'step']].drop_duplicates(keep='last'), on='session_id', how='inner' )
#solution.to_csv( BASE_PATH + '/' + SET + '/solution_' + ALGKEY + '.csv' )
result = evaluate( solution, base=BASE_PATH, dataset=SET )
print( result.T )
del solution,test,X_test,y_test,d_train, d_valid, watchlist
gc.collect()
return -1 * result['mrr@A'].values[0]
def main():
space = {
'ltr': hp.choice('ltr', [True]),
'shuffle': hp.choice('shuffle', [False]),
'num_leaves': hp.choice('num_leaves', list(np.arange(8, 256, 2, dtype=int) )),
'max_depth': hp.choice('max_depth', list(np.arange(4, 64, 2, dtype=int) )),
'max_bin': hp.choice('max_bin', list(np.arange(255, 255*4, 5, dtype=int) )),
'min_data_in_leaf': hp.choice('min_data_in_leaf', list(np.arange(5, 100, 5, dtype=int) )),
'learning_rate': hp.uniform('learning_rate', 0.01, 0.3),
'bagging_fraction': hp.uniform('bagging_fraction', 0.2, 1.0),
'feature_fraction': hp.uniform('feature_fraction', 0.2, 1.0),
'early_stopping': hp.uniform('test_size', 100, 1000),
}
trials_step = 1 # how many additional trials to do after loading saved trials. 1 = save after iteration
max_trials = 1 # initial max_trials. put something small to not have to wait
try: # try to load an already saved trials object, and increase the max
trials = pickle.load(open( BASE_PATH + SET + TRAILKEY + '.hyperopt', "rb"))
print("Found saved Trials! Loading...")
max_trials = len(trials.trials) + trials_step
print("Rerunning from {} trials to {} (+{}) trials".format(len(trials.trials), max_trials, trials_step))
except: # create a new trials object and start searching
trials = Trials()
best = fmin(fn=objective,
space=space,
algo=tpe.suggest,
trials=trials,
max_evals=max_trials)
print("Best:", best)
print("Num:", max_trials)
# save the trials object
with open(BASE_PATH + SET + TRAILKEY + ".hyperopt", "wb") as f:
pickle.dump(trials, f)
if __name__ == '__main__':
while True:
main()
|
6,716 | 515967656feea176e966de89207f043f9cc20c61 | """Config flow for Philips TV integration."""
from __future__ import annotations
from collections.abc import Mapping
import platform
from typing import Any
from haphilipsjs import ConnectionFailure, PairingFailure, PhilipsTV
import voluptuous as vol
from homeassistant import config_entries, core
from homeassistant.const import (
CONF_API_VERSION,
CONF_HOST,
CONF_PASSWORD,
CONF_PIN,
CONF_USERNAME,
)
from homeassistant.data_entry_flow import FlowResult
from . import LOGGER
from .const import CONF_ALLOW_NOTIFY, CONF_SYSTEM, CONST_APP_ID, CONST_APP_NAME, DOMAIN
USER_SCHEMA = vol.Schema(
{
vol.Required(
CONF_HOST,
): str,
vol.Required(
CONF_API_VERSION,
default=1,
): vol.In([1, 5, 6]),
}
)
async def _validate_input(
hass: core.HomeAssistant, host: str, api_version: int
) -> PhilipsTV:
"""Validate the user input allows us to connect."""
hub = PhilipsTV(host, api_version)
await hub.getSystem()
await hub.setTransport(hub.secured_transport)
if not hub.system:
raise ConnectionFailure("System data is empty")
return hub
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Philips TV."""
VERSION = 1
def __init__(self) -> None:
"""Initialize flow."""
super().__init__()
self._current: dict[str, Any] = {}
self._hub: PhilipsTV | None = None
self._pair_state: Any = None
self._entry: config_entries.ConfigEntry | None = None
async def _async_create_current(self) -> FlowResult:
system = self._current[CONF_SYSTEM]
if self._entry:
self.hass.config_entries.async_update_entry(
self._entry, data=self._entry.data | self._current
)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._entry.entry_id)
)
return self.async_abort(reason="reauth_successful")
return self.async_create_entry(
title=f"{system['name']} ({system['serialnumber']})",
data=self._current,
)
async def async_step_pair(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Attempt to pair with device."""
assert self._hub
errors: dict[str, str] = {}
schema = vol.Schema(
{
vol.Required(CONF_PIN): str,
}
)
if not user_input:
try:
self._pair_state = await self._hub.pairRequest(
CONST_APP_ID,
CONST_APP_NAME,
platform.node(),
platform.system(),
"native",
)
except PairingFailure as exc:
LOGGER.debug(exc)
return self.async_abort(
reason="pairing_failure",
description_placeholders={"error_id": exc.data.get("error_id")},
)
return self.async_show_form(
step_id="pair", data_schema=schema, errors=errors
)
try:
username, password = await self._hub.pairGrant(
self._pair_state, user_input[CONF_PIN]
)
except PairingFailure as exc:
LOGGER.debug(exc)
if exc.data.get("error_id") == "INVALID_PIN":
errors[CONF_PIN] = "invalid_pin"
return self.async_show_form(
step_id="pair", data_schema=schema, errors=errors
)
return self.async_abort(
reason="pairing_failure",
description_placeholders={"error_id": exc.data.get("error_id")},
)
self._current[CONF_USERNAME] = username
self._current[CONF_PASSWORD] = password
return await self._async_create_current()
async def async_step_reauth(self, entry_data: Mapping[str, Any]) -> FlowResult:
"""Handle configuration by re-auth."""
self._entry = self.hass.config_entries.async_get_entry(self.context["entry_id"])
self._current[CONF_HOST] = entry_data[CONF_HOST]
self._current[CONF_API_VERSION] = entry_data[CONF_API_VERSION]
return await self.async_step_user()
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step."""
errors = {}
if user_input:
self._current = user_input
try:
hub = await _validate_input(
self.hass, user_input[CONF_HOST], user_input[CONF_API_VERSION]
)
except ConnectionFailure as exc:
LOGGER.error(exc)
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
if serialnumber := hub.system.get("serialnumber"):
await self.async_set_unique_id(serialnumber)
if self._entry is None:
self._abort_if_unique_id_configured()
self._current[CONF_SYSTEM] = hub.system
self._current[CONF_API_VERSION] = hub.api_version
self._hub = hub
if hub.pairing_type == "digest_auth_pairing":
return await self.async_step_pair()
return await self._async_create_current()
schema = self.add_suggested_values_to_schema(USER_SCHEMA, self._current)
return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
@staticmethod
@core.callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> OptionsFlowHandler:
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for AEMET."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Required(
CONF_ALLOW_NOTIFY,
default=self.config_entry.options.get(CONF_ALLOW_NOTIFY),
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
|
6,717 | 87c27711c0089ca2c7e5c7d0e9edb51b9d4008d9 | # -*- coding: utf-8 -*-
import requests
import csv
from lxml import html
import json
class ycombinatorParser():
siteurl = 'https://news.ycombinator.com/'
def getNextPage(pageurl):
response = requests.get(pageurl)
parsed_body = html.fromstring(response.text)
nextpage=parsed_body.xpath('//a[@class="morelink"]')
try:
nexthref=nextpage[0].get('href')
except IndexError:
nexthref = ''
return nexthref
def parsePage(parsed_body,rownumber):
def jsonWriteLine(rownumber,title,autor,url,site):
line = '{"Rownumber": %d,\n "title": "%s",\n "autor": "%s",\n "url": "%s",\n "site": "%s",\n }\n' %(rownumber,title,autor,url,site)
#print line
return line
def getNews(rownews):
newsdict = {}
for news in rownews:
newsdict["title"] = ''.join(news.xpath('./a/text()'))
for i in news.xpath('./a'):
newsdict["url"] = i.get('href')
newsdict["site"] = ''.join(news.xpath('./span/a/span/text()'))
return newsdict
def getAuthor(rowautor):
authordict = {}
for author in rowautor:
authordict["autor"] = ''.join(author.xpath('./a[1]/text()'))
return authordict
for row in parsed_body.xpath('//tr'):
rownews = row.xpath('./td[@class="title"][2]')
rowautor = row.xpath('./td[@class="subtext"][1]')
datadict = {}
rowdata = {}
if rownews:
datadict = getNews(rownews)
if rowautor:
for author in rowautor:
datadict = getAuthor(rowautor)
if datadict:
autor = ''
try:
title=datadict["title"]
url=datadict["url"]
site=datadict["site"]
except KeyError:
autor = datadict["autor"]
if autor:
rowdata['rownumber'] = str(rownumber)
rowdata['title'] = str(title)
rowdata['autor'] = str(autor)
rowdata['url'] = str(url)
rowdata['site'] = str(site)
with open('nix.json',mode='a') as f:
json.dump(rowdata,f)
#outputfile.write(jsonWriteLine(rownumber,title,autor,url,site))
#print jsonWriteLine(rownumber,title,autor,url,site)
rownumber += 1
if rownumber>2:
exit()
return rownumber
def __unicode__(self):
return unicode(self.rowdata)
pageflag = True
rownumber = 1
pageparse = siteurl
with open('nix.json',mode='w') as f:
json.dump('',f)
while pageflag:
response = requests.get(pageparse)
parsed_body = html.fromstring(response.text)
rownumber = parsePage(parsed_body,rownumber)-1
pageparse = siteurl+getNextPage(pageparse)
if pageparse == siteurl:
pageflag = False
if __name__ == '__main__':
ycombinatorParser() |
6,718 | eb50f50e3c072c2f6e74ff9ef8c2fa2eef782aae | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Preprocess data obtained for training
Cora and Citeseer datasets are supported by our example, the original versions of these datasets are as follows:
@inproceedings{nr,
title={The Network Data Repository with Interactive Graph Analytics and Visualization},
author={Ryan A. Rossi and Nesreen K. Ahmed},
booktitle={AAAI},
url={http://networkrepository.com},
year={2015}
}
In this example, we use dataset splits provided by https://github.com/kimiyoung/planetoid (Zhilin Yang, William W. Cohen, Ruslan Salakhutdinov, [Revisiting Semi-Supervised Learning with Graph Embeddings](https://arxiv.org/abs/1603.08861), ICML 2016).
"""
import numpy as np
import mindspore.dataset as ds
def adj_to_bias(adj):
"""Add self loop to adj and make sure only one hop neighbors are engaged in computing"""
num_graphs = adj.shape[0]
adj_temp = np.empty(adj.shape)
for i in range(num_graphs):
adj_temp[i] = adj[i] + np.eye(adj.shape[1])
return -1e9 * (1.0 - adj_temp)
def get_biases_features_labels(data_dir):
"""Get biases, features, labels from Dataset"""
g = ds.GraphData(data_dir)
nodes = g.get_all_nodes(0)
nodes_list = nodes.tolist()
row_tensor = g.get_node_feature(nodes_list, [1, 2])
features = row_tensor[0]
features = features[np.newaxis]
labels = row_tensor[1]
nodes_num = labels.shape[0]
class_num = labels.max() + 1
labels_onehot = np.eye(nodes_num, class_num)[labels].astype(np.float32)
neighbor = g.get_all_neighbors(nodes_list, 0)
node_map = {node_id: index for index, node_id in enumerate(nodes_list)}
adj = np.zeros([nodes_num, nodes_num], dtype=np.float32)
for index, value in np.ndenumerate(neighbor):
if value >= 0 and index[1] > 0:
adj[node_map[neighbor[index[0], 0]], node_map[value]] = 1
adj = adj[np.newaxis]
biases = adj_to_bias(adj)
return biases, features, labels_onehot
def get_mask(total, begin, end):
"""Generate mask according to begin and end position"""
mask = np.zeros([total]).astype(np.float32)
mask[begin:end] = 1
return np.array(mask, dtype=np.bool)
def load_and_process(data_dir, train_node_num, eval_node_num, test_node_num):
"""Load cora dataset and preprocessing"""
biases, feature, label = get_biases_features_labels(data_dir)
# split training, validation and testing set
nodes_num = label.shape[0]
train_mask = get_mask(nodes_num, 0, train_node_num)
eval_mask = get_mask(nodes_num, train_node_num, train_node_num + eval_node_num)
test_mask = get_mask(nodes_num, nodes_num - test_node_num, nodes_num)
y_train = np.zeros(label.shape)
y_val = np.zeros(label.shape)
y_test = np.zeros(label.shape)
y_train[train_mask, :] = label[train_mask, :]
y_val[eval_mask, :] = label[eval_mask, :]
y_test[test_mask, :] = label[test_mask, :]
y_train = y_train[np.newaxis]
y_val = y_val[np.newaxis]
y_test = y_test[np.newaxis]
train_mask = train_mask[np.newaxis]
eval_mask = eval_mask[np.newaxis]
test_mask = test_mask[np.newaxis]
return feature, biases, y_train, train_mask, y_val, eval_mask, y_test, test_mask
|
6,719 | d442d5c7afd32dd149bb47fc9c4355409c53dab8 | import array as arr
# from array import * # To remove use of 'arr' everytime.
studentMarks = arr.array('i', [2,30,45,50,90]) # i represnts datatype of array which is int here.
# accessing array
print(studentMarks[3])
studentMarks.append(95)
# using for loop
for i in studentMarks:
print(i)
# using while loop
i = 0
while i < len(studentMarks):
print(studentMarks[i])
i += 1
|
6,720 | 8da775bd87bfeab5e30956e62bcdba6c04e26b27 | import json
# numbers=[2,3,5,7,11,13]
filename='numbers.json'
with open(filename) as f:
numbers=json.load(f)
print(numbers) |
6,721 | ea696329a0cfd558fb592ffaf6339a35e8950a3c | class Solution:
def commonFactors(self, a: int, b: int) -> int:
gcd = math.gcd(a, b)
return sum(a % i == 0 and b % i == 0
for i in range(1, gcd + 1))
|
6,722 | 4a4745f202275e45fd78c12431e355fd59ac964a | class SlackEvent:
@property
def client_msg_id(self):
pass
@property
def type(self):
pass
@property
def subtype(self):
pass
@property
def text(self):
pass
@property
def time_stamp(self):
pass
@property
def channel(self):
pass
@property
def channel_id(self):
pass
@property
def event_time_stamp(self):
pass
@property
def channel_type(self):
pass
@property
def thread_time_stamp(self):
pass
@property
def user(self):
pass
@property
def user_id(self):
pass
@property
def bot_id(self):
pass
@property
def actions(self):
pass
@property
def item(self):
pass
@property
def item_channel(self):
pass
@property
def files(self):
pass
@property
def message(self):
pass
|
6,723 | d1b025ddbf7d0ad48ff92a098d074820a3eb35ed | #!/usr/bin/python
# encoding:utf-8
from selenium.webdriver.common.by import By
import random
import basePage
# 门店入库button
stock_in = (By.XPATH, "//android.widget.TextView[contains(@text,'门店入库')]")
# 调拨入库button
transfer_in = (By.XPATH, "//android.widget.TextView[contains(@text,'调拨入库')]")
# 确认签收button
take_receive = (By.ID, '%s:id/take_receive'%basePage.package_name)
# 查看详情
details_text = (By.ID, '%s:id/details_text'%basePage.package_name)
# 调拨单代签收状态
transfer_confirm_state = (By.ID, "%s:id/state")
# 差异签收弹出框
transfer_diff_wizard = (By.ID, "%s:id/multiple_dialog_container")
# 确认签收按钮
text_confirm_button = (By.ID, "%s:id/text_confirm")
# 差异收货button
diff_confirm_button = (By.XPATH, "//android.widget.TextView[contains(@text,'差异收货')]")
# 订单状态
state_num = random.randint(1, 4)
order_of_state = (By.XPATH, "//android.widget.TextView[%s]" % state_num)
# 订单状态下拉
title = (By.ID, '%s:id/title'%basePage.package_name)
# 展开订单详情
fold_image = (By.ID, '%s:id/fold_image'%basePage.package_name)
# 高级搜索button
order_search = (By.ID, '%s:id/order_search'%basePage.package_name)
# 查询
search_query = (By.ID, '%s:id/search_query'%basePage.package_name)
# 调拨单号输入框
search_order_no = (By.ID, '%s:id/search_order_no'%basePage.package_name)
# 商品编码输入框
search_order_sku = (By.ID, '%s:id/search_order_sku'%basePage.package_name)
# 发货店仓输入框
search_order_org = (By.ID, '%s:id/search_order_org'%basePage.package_name)
# 调拨类型
type_edit = (By.ID, '%s:id/type_edit'%basePage.package_name)
# 调拨类型option
transfer_options1 = (By.ID, '%s:id/options1'%basePage.package_name)
transfer_options_submit = (By.ID, '%s:id/btnSubmit'%basePage.package_name)
# 日期范围
all_check = (By.ID, '%s:id/all_check'%basePage.package_name)
out_check = (By.ID, '%s:id/out_check'%basePage.package_name)
in_check = (By.ID, '%s:id/in_check'%basePage.package_name)
# 操作人输入框
operate_edit = (By.ID, '%s:id/operate_edit'%basePage.package_name)
# 重置
search_clear = (By.ID, '%s:id/search_clear'%basePage.package_name)
# 取消
search_up_cancel = (By.ID, '%s:id/search_up_cancel'%basePage.package_name)
# 调拨单状态
order_state = (By.XPATH, "//android.widget.TextView[contains(@text,'已完成')]")
# 调拨单号
allocate_name = (By.ID, '%s:id/allocate_name'%basePage.package_name)
# 高级搜索,选择开始日期
start_at = (By.ID, '%s:id/start_at'%basePage.package_name)
# 高级搜索,选择结束日期
end_at = (By.ID, '%s:id/end_at'%basePage.package_name)
# 高级搜索,选择日
day = (By.ID, '%s:id/day'%basePage.package_name)
# H5定位
# 只看差异
btn_view_diff = (By.CLASS_NAME, 'btn-view-diff')
# 搜索button
searchIcon = (By.ID, 'searchIcon')
# 搜索条件
input_item = (By.CLASS_NAME, 'input-item')
# 清空搜索内容
icon_delete = (By.XPATH, "//div[@class='keyboard']/div[1]/img[@class='icon-delete']")
# 返回
back_btn = (By.XPATH, "//div[@class='icon-back']/img[@alt='<']")
# 保存
btn_save = (By.CLASS_NAME, 'btn-save')
# 手工添加
add_handle = (By.XPATH, "//div[@class='before-focus']/div[1]")
# 扫码添加
add_border_node = (By.XPATH, "//div[@class='before-focus']/div[2]")
# 导入采集
loggingimport = (By.XPATH, "//div[@class='before-focus']/div[3]")
# 更多
btn_more = (By.CLASS_NAME, 'btn-more')
# 清空列表
btn_close_native = (By.CLASS_NAME, 'btn-close-native')
# 点击修改收货数量
icon_edit = (By.XPATH, "//table[@class='el-table__body']/tbody[1]/tr[1]/td[3]/div[1]/div[1]/div[2]")
# 填写收货数量
div_num = random.randint(1,9)
num_key = (By.XPATH, "//div[@class='keyboard']/div[2]/div[%s]"%div_num)
num_keys = (By.XPATH, "//div[@class='keyboard']/div[2]")
# 确认修改收货数量
key_confirm = (By.XPATH, "//div[@class='keyboard']/div[2]/div[12]")
# 订单内容
result_item = (By.CLASS_NAME, 'result-item')
|
6,724 | 7262d7a82834b38762616a30d4eac38078e4b616 | # 遍历(循环) 出字符串中的每一个元素
str01 = "大发放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&"
# ----->字符串中的元素都是有索引的,根据索引可以得到对应的元素
# 而---3
a = str01[3]
print(str01[3])
# 发---1
print(str01[1])
#---->计算字符串的长度
# 这个字符串中 有 35个元素 ,长度是35
l01 = len(str01)
print(l01)
str01 = "大放而非asdfasfasdfa,,,,aadfa阿斯顿发水电费&&"
# 最后一个元素的索引:字符串的长度-1
len01 = len(str01)# 字符串的长度
index_last = len01 - 1 # 最后一个元素的索引
i = 0 # i变量表示是 元素的索引
while i <= index_last:
print(str01[i])
i += 1
print()
print("上面的循环结束了 执行到这里")
'''
0 1 2 ..... 34
'''
|
6,725 | 4af05a13264c249be69071447101d684ff97063e | import sys
import numpy as np
import math
import matplotlib.pyplot as plt
import random
def load_files(training, testing):
tr_feat = np.genfromtxt(training, usecols=range(256), delimiter=",")
tr_feat /= 255.0
tr_feat = np.insert(tr_feat, 0, 0, axis=1)
tr_exp = np.genfromtxt(training, usecols=range(-1), delimiter=",")
tr_exp = tr_exp[:, -1]
te_feat = np.genfromtxt(testing, usecols=range(256), delimiter=",")
te_feat /= 255.0
te_feat = np.insert(te_feat, 0, 0, axis=1)
te_exp = np.genfromtxt(testing, usecols=range(-1), delimiter=",")
te_exp = te_exp[:, -1]
# for i in tr_feat:
# if i > 1 or i < 0:
# raise ValueError("WHY")
# for i in te_feat:
# if i > 1 or i < 0:
# raise ValueError("WHY")
return tr_feat, tr_exp, te_feat, te_exp
def sigmoid(weight, case):
# try:
exponent = -np.dot(weight.T, case)
try:
prediction = 1.0 / (1.0 + math.exp(exponent))
except Exception as e:
return 1.0 / (1.0 + math.exp(500))
# If you've gotten this far you've noticed that the last two accuracies are always 50%
# I couldn't tell you why, seeing as our weights look correct
# And
return prediction
def check_accuracy(w, x, y):
correct = 0
for i in range(x.shape[0]):
if np.dot(w.T, x[i]) >= 0.0 and y[i] == 1:
correct += 1
elif np.dot(w.T, x[i]) < 0.0 and y[i] == 0:
correct += 1
percentage_correct = correct / x.shape[0]
return percentage_correct
def gradient(training_data, training_expected, testing_data, testing_expected, reg_strength=None, iterations=100, learning_rate=0.00005):
training_accuracies = []
testing_accuracies = []
if reg_strength is not None:
try:
reg_strength = float(reg_strength)
except:
reg_strength = None
w = np.zeros(training_data.shape[1]) # Feature count
for _ in range(iterations):
gradient_batch = np.zeros(training_data.shape[1]) # Feature count
for i in range(training_data.shape[0]): # Example count
predicted = sigmoid(w, training_data[i])
diff = (np.subtract(
predicted, training_expected[i]))
diff = np.multiply(diff, training_data[i])
gradient_batch = np.add(gradient_batch, diff)
if reg_strength is not None:
normalized = np.linalg.norm(w)
gradient_batch = np.add(
gradient_batch, np.multiply(normalized, reg_strength))
gradient_batch = np.multiply(learning_rate, gradient_batch)
w = np.subtract(w, gradient_batch)
training_accuracies.append(check_accuracy(
w, training_data, training_expected))
testing_accuracies.append(check_accuracy(
w, testing_data, testing_expected))
return training_accuracies, testing_accuracies
args = sys.argv[1:]
if len(args) < 2:
print("You must include a training and testing dataset, as well as a learning rate", file=sys.stderr)
print("Like so: python3 q2_1.py usps_train.csv usps_test.csv learning_rate")
exit(1)
iterations = []
for i in range(0, 100):
iterations.append(i+1)
training_features, training_expected, test_features, test_expected = load_files(
args[0], args[1])
training_accuracies, testing_accuracies = gradient(
training_features, training_expected, test_features, test_expected, learning_rate=float(args[2]))
plt.ylabel("Accuracy")
plt.xlabel("Iteration")
plt.title(f"Accuracy as Function of Iteration Learing Rate = {args[2]}")
plt.plot(iterations, training_accuracies, 'b', label='training')
plt.plot(iterations, testing_accuracies, 'r', label='testing')
plt.legend()
plt.show()
plt.savefig(f"graph_results.png")
|
6,726 | b49e5b40ce1e16f1b7c0bd9509daf94f36c51256 | from app.api import app
def main():
app.run(host='0.0.0.0', port=5001)
if __name__ == '__main__':
main()
|
6,727 | 4cc6a9c48e174b33ed93d7bda159fcc3a7b59d4c | from django.contrib import admin
from .models import Profile, Address
admin.site.register(Profile)
admin.site.register(Address)
|
6,728 | 5e78992df94cbbe441495b7d8fb80104ec000748 | #!/usr/bin/python2
import md5
from pwn import *
import time
LIMIT = 500
TARGET = "shell2017.picoctf.com"
PORT = 46290
FILE = "hash.txt"
def generate_hashes(seed):
a = []
current_hash = seed
for i in range(1000):
current_hash = md5.new(current_hash).hexdigest()
a.append(current_hash)
return a
def find_prev_hash(array,current_hash):
return array[array.index(current_hash)-1]
def skip_intro(conn):
for i in range(6):
conn.recvline()
def skip_question(conn):
for i in range(4):
conn.recvline()
def go_to_register(conn):
#there's a bunch of crap that needs to be skipped for recvline()
skip_intro(conn)
skip_question(conn)
conn.sendline("r")
def extract_hash_id():
conn = remote(TARGET,PORT)
go_to_register(conn)
id_and_seed = conn.recvline().split()
hash_id = id_and_seed[7].rstrip('\n')
seed = id_and_seed[-1].rstrip('\n')
conn.recvline()
current_hash = conn.recvline().rstrip('\n')
prev_hash = find_prev_hash(generate_hashes(seed),current_hash)
conn.sendline(prev_hash)
#Yep is in the validated hash, so we will use this as the success metric
if "Yep!" in conn.recvline():
conn.close()
return (hash_id, seed)
conn.close()
return None
def main():
hash_list = []
for i in range(1000):
try:
hash_list.append(extract_hash_id())
except:
time.sleep(1)
continue
with open("hash.txt","a") as fp:
for tup in hash_list:
fp.write("{} {}\n".format(tup[0],tup[1]))
#I went back into the code to use this function whenever I found a match in my hash text file
# print(find_prev_hash(generate_hashes("ead81fe8cfe9fda9e4c2093e17e4d024"),"58cb392a127b699c6f22f228e23ae73e"))
if __name__ == "__main__":
main()
|
6,729 | 84a13e3dea885d6c4a5f195dfac51c7110102fc2 | #!/usr/bin/env python3
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
from ev3dev2.sensor.lego import ColorSensor, UltrasonicSensor
from ev3dev2.power import PowerSupply
# initiate color sensors
# the colour sensor needs to be between 1-2 cm away from the surface you are trying to measure. (color mode)
# TODO confirm the mapping
colorSensor_lt = ColorSensor(INPUT_4)
colorSensor_rt = ColorSensor(INPUT_1)
ultrasonicSensor = UltrasonicSensor(INPUT_2)
# COL-REFLECT COL-AMBIENT COL-COLOR RGB-RAW
colorSensor_mode_default = "COL-COLOR"
colorSensor_lt.mode="COL-COLOR"
colorSensor_rt.mode="COL-COLOR"
ultrasonicSensor.mode="US-DIST-CM"
powerSupply = PowerSupply()
def getColorString(color_reading):
if(color_reading==1):
return "black"
elif(color_reading==2):
#return "blue"
return "white"
elif(color_reading==3):
return "green"
elif(color_reading==4):
#return "yellow"
return "white"
elif(color_reading==5):
return "red"
elif(color_reading==6):
return "white"
elif(color_reading==7):
return "brown"
return str(color_reading)
def getColorReadingInString(sensor_positon):
if(sensor_positon=="left"):
return getColorString(colorSensor_lt.value())
if(sensor_positon=="right"):
return getColorString(colorSensor_rt.value())
|
6,730 | 787397473c431d2560bf8c488af58e976c1864d0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2021-04-09 06:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lkft', '0021_reportjob_finished_successfully'),
]
operations = [
migrations.AddField(
model_name='cibuild',
name='changes_num',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='cibuild',
name='display_name',
field=models.CharField(max_length=255, null=True),
),
]
|
6,731 | b39c783cbaff2915c8864ce0b081b5bf052baee5 | from django.urls import path
from .views import *
urlpatterns = [
path('country',Country_Data,name='country_data'),
path('tours',Scrape_Data, name='scrape_data'),
path('draws', Draw_Data, name='Draw_data')
]
|
6,732 | 4f21fb4168ed29b9540d3ca2b8cf6ef746c30831 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# http://stackoverflow.com/questions/5276967/python-in-xcode-4
"""tv_write_xyzt2matlab.py: TremVibe Write Accelerometer XYZ and Timestamp to .m file"""
__author__ = "Salvador Aguinaga"
import sys
import MySQLdb
import math
from itertools import groupby
import csv
##########----------##########----------##########----------##########----------
def parse_email_records(row_count, emails):
#print np.shape(results)
'''Puts the filenames in the given iterable into a dictionary where
the key is the first component of the emails and the value is
a list of the records for that email.'''
keyfunc = lambda f: f.split('@', 1)[0]
return dict( (k, list(g)) for k,g in groupby(
sorted((i[0] for i in emails), key=keyfunc), key=keyfunc
) )
#db = MySQLdb.connect('192.168.1.112', 'tremcam', 'trem00xB','neurobit')
#cursor = db.cursor()
#
#query = sys.argv[1]
#cursor.execute(query)
#result = cursor.fetchall()
##########----------##########----------##########----------##########----------
if (len(sys.argv) < 1):
print "Usage: ql2matlabcsvdat.py hostname email-address" #`"sql query`""
print " : hostname = cloud server hostname running the mysql service"
exit(0)
else:
email = sys.argv[1]
server = 'localhost'
try:
conn = MySQLdb.Connection(server, 'triaxim', 'tria00xB', 'neurobit')
cursr = conn.cursor()
query = """SELECT accelX, accelY, accelZ, accelTs from TremCamTbl WHERE email =%s"""
cursr.execute(query,email)
conn.commit()
row_count = cursr.rowcount
results = cursr.fetchall()
#print type(results)
#print len(results)
f = open('/tmp/tv_user_accelxyzt.m', 'w')
#for t in results:
#line = ' '.join(str(x) for x in t)
# line = " ".join(map(str, results))
# f.write(line + '\n')
f.write('%% Matlab .m formated file \n')
f.write('%% Accelerometer data for user:%s \n' % (email))
f.write('%% Example how to plot X values of record 6:\n')
f.write('%% plot ((rec6matrix(4,:) - rec6matrix(4,1))/1e+6,rec6matrix(1,:))\n')
rec_no = 0
for record in results:
#f.write('%s %s %s %s\n' % tuple)
#print len(record)
rec_no +=1
f.write('%% record #:%d\n' % rec_no)
#f.write('%% Xvalues\bYvalues\bZvalues\bTimestamp\n')
if len(record) >= 4:
f.write('xVal%d = [%s];\n' % (rec_no,record[0]))
f.write('yVal%d = [%s];\n' % (rec_no,record[1]))
f.write('zVal%d = [%s];\n' % (rec_no,record[2]))
f.write('tVal%d = [%s];\n' % (rec_no,record[3]))
f.write('rec%dmatrix = [xVal%d;yVal%d;zVal%d;tVal%d];\n'%(rec_no,rec_no,rec_no,rec_no,rec_no))
f.close()
# emailDict = parse_email_records(row_count, results)
# with open("/var/www/tremvibe/subject_accel_sensor_dat.csv", "wb") as f:
# csv.writer(f).writerow(['Subject', 'Records'])
# for email in emailDict:
#print email,',',len(emailDict[email])
# csv.writer(f).writerow([email, len(emailDict[email])])
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
finally:
if conn:
conn.close()
|
6,733 | db341c3686c53f1cd9fe98c532f17e872952cbba | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 17:28:48 2018
@author: otalabay
"""
LOCAL_INFO = 1
LSM = 2
TLS = 3
TLS_STOP = 4
DANGER = 5
STOP_DANGER = 6
PM = 7
PM_STOP = 8 |
6,734 | 56640454efce16e0c873d557ac130775a4a2ad8d | n,m=map(int,input().split())
l=list(map(int,input().split()))
t=0
result=[0 for i in range(0,n)]
result.insert(0,1)
while(t<m):
#print(t)
for i in range(l[t],n+1):
result[i]=result[i]+result[i-l[t]]
t=t+1
print(result[-1])
0 1 2 3 4
1 [1,1,1,1,1]
2 [1 1 2 2 3]
3 [1 1 2 3 4]
|
6,735 | 9f2105d188ac32a9eef31b21065e9bda13a02995 | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2015] Michał Szczygieł, M4GiK Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
from django import template
register = template.Library()
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/fieldsetForm.html')
def show_fieldsetform_nrf(form):
"""
Renders given form with required fields marked.
@param form:
@return:
"""
return {'form': form, 'required_fields': False}
@register.inclusion_tag('tags/sendForm.html')
def show_sendform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/loginForm.html')
def show_loginform(form):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': True}
@register.inclusion_tag('tags/accountForm.html')
def show_accountform(form, is_superuser):
"""
Renders given form without marking required fields.
@param form:
@return:
"""
return {'form': form, 'required_fields': False, 'is_superuser': is_superuser}
|
6,736 | 34db3c9998e1d7647dd954e82e18147504cc74fc | """
@version:
author:yunnaidan
@time: 2019/07/22
@file: download_mseed.py
@function:
"""
from obspy.clients.fdsn import Client
from obspy.core import UTCDateTime
import numpy as np
import obspy
import os
import re
import time
import glob
import shutil
import platform
import subprocess
import multiprocessing
def load_stations(filename):
with open(filename, 'r') as f:
sta_data = f.readlines()
sta_list = []
for l in range(1, len(sta_data)):
sta_info = sta_data[l]
net_name = re.split(',', sta_info)[0]
sta_name = re.split(',', sta_info)[1]
chan_name = re.split(',', sta_info)[2]
sta_list.append([net_name, sta_name, chan_name])
return sta_list
def set_folders(out_path, startday, endday):
day = startday
while day <= endday:
year_folder = str(day.year).zfill(4)
day_folder = str(day.year).zfill(
4) + str(day.month).zfill(2) + str(day.day).zfill(2)
out_folder = os.path.join(out_path, year_folder, day_folder)
if not os.path.exists(out_folder):
os.makedirs(out_folder)
day = day + 86400
return None
def obspy_download(
client,
net_name,
sta_name,
chn_name,
starttime,
endtime,
out_path,
time_thre=10):
year_folder = str(starttime.year)
day_folder = str(starttime.year).zfill(
4) + str(starttime.month).zfill(2) + str(starttime.day).zfill(2)
out_folder = os.path.join(out_path, year_folder, day_folder)
outfile = os.path.join(
out_folder, net_name + '.' + sta_name + '.' + chn_name + '.mseed')
# Incremental download
if not os.path.exists(outfile):
t = 0
flag = False
while flag == False and t < time_thre:
try:
client.get_waveforms(
network=net_name,
station=sta_name,
location='--',
channel=chn_name,
starttime=starttime,
endtime=endtime,
filename=outfile)
flag = True
except BaseException:
pass
time.sleep(0.5)
t += 1
if not flag:
with open('download.log', 'a') as f:
f.write('No data: ' + outfile + '\n')
return None
def obspy_download_parallel(
data_center,
startday,
endday,
sta_file,
out_path,
cores=1):
set_folders(out_path, startday, endday)
sta_list = load_stations(sta_file)
with open('download.log', 'a') as f:
f.write('>>> ' + str(time.localtime(time.time())) + '\n')
f.write('The number of stations is: ' + str(len(sta_list)) + '\n')
day = startday
while day <= endday:
t_b = time.time()
with open('download.log', 'a') as f:
f.write('Day: ' + str(day) + '\n')
print(day)
starttime = day
endtime = day + 86400
client = Client(data_center)
if cores == 1:
for i in range(len(sta_list)):
sta = sta_list[i]
print (sta)
net_name = sta[0]
sta_name = sta[1]
chan_name = sta[2]
obspy_download(
client,
net_name,
sta_name,
chan_name,
starttime,
endtime,
out_path)
else:
pass
t_e = time.time()
with open('download.log', 'a') as f:
f.write('Using time: ' + str(t_e - t_b) + '\n')
day = day + 86400
return None
def stp_run_download(sta_list, download_date, out_path):
with open('download.log', 'a') as f:
f.write(str(download_date) + '\n')
tb = time.time()
year = str(download_date.year).zfill(4)
month = str(download_date.month).zfill(2)
day = str(download_date.day).zfill(2)
day_folder = year + month + day
out_folder = os.path.join(out_path, year, day_folder)
out_folder_old = os.path.join(out_path + '_old', year, day_folder)
p = subprocess.Popen(['stp'], stdin=subprocess.PIPE)
s = "MSEED \n"
for i in range(len(sta_list)):
sta = sta_list[i]
net_name = sta[0]
sta_name = sta[1]
chan_name = sta[2]
out_sta_file = glob.glob(
os.path.join(
out_folder_old, '*%s.%s.%s*' %
(net_name, sta_name, chan_name)))
if len(out_sta_file) == 0:
s += "WIN {} {} {} {}/{}/{},00:00:00 +1d \n".format(
net_name, sta_name, chan_name, year, month, day)
s += "quit \n"
p.communicate(s.encode())
out_files = glob.glob('%s%s%s*.*' % (year, month, day))
for out_file in out_files:
shutil.move(out_file, out_folder)
te = time.time()
with open('download.log', 'a') as f:
f.write('Using time: ' + str(te - tb) + '\n')
def stp_download_parallel(startday, endday, sta_file, out_path, cores=1):
'''
:param startday: obspy.core.utcdatetime.UTCDateTime
:param endday: obspy.core.utcdatetime.UTCDateTime
:param sta_file: Network,Station,Channel,Latitude,Longitude
:param out_path:
:param cores:
:return:
'''
if os.path.exists('download.log'):
os.remove('download.log')
with open('download.log', 'a') as f:
f.write('>>> ' + str(time.localtime(time.time())) + '\n')
set_folders(out_path, startday, endday)
sta_list = load_stations(sta_file)
pool = multiprocessing.Pool(processes=cores)
tasks = []
day = startday
while day <= endday:
print(day)
# tasks.append((sta_list, day, out_path))
stp_run_download(sta_list, day, out_path)
day = day + 86400
'''
# chunksize is how many tasks will be processed by one processor
rs = pool.starmap_async(stp_run_download, tasks, chunksize=1)
# close() & join() is necessary
# No more work
pool.close()
# simple progress bar
while (True):
remaining = rs._number_left
print("finished:{0}/{1}".format(len(tasks) - remaining, len(tasks)),
end='\r') # '\r' means remove the last line
if (rs.ready()):
break
time.sleep(0.5)
# Wait for completion
pool.join()
'''
return None
if __name__ == '__main__':
LOCAL_PATH = '/Users/yunnaidan/Project/Dynamic_Triggering/Workspace/Central_California'
REMOTE_PATH = '/home/yunnd/Workspace/Dynamic_triggering/Central_California'
if platform.system() == 'Darwin':
ROOT_PATH = LOCAL_PATH
if platform.system() == 'Linux':
ROOT_PATH = REMOTE_PATH
startday = UTCDateTime('2009-01-03')
endday = UTCDateTime('2009-01-05')
sta_file = os.path.join(
ROOT_PATH,
'data/station_info/stations_CI_selected_for_download_BH.txt')
out_path = os.path.join(ROOT_PATH, 'data/time_series/raw_data/mseed')
data_center = 'SCEDC'
obspy_download_parallel(
data_center,
startday,
endday,
sta_file,
out_path,
cores=1)
# stp_download_parallel(startday, endday, sta_file, out_path, cores=15)
pass
|
6,737 | 7da8a074704b1851ac352477ef72a4c11cea1a0b | #_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-#
# PROJECT : RegCEl - Registro para el Consumo Eléctrico #
# VERSION : 1.2 #
# AUTHOR : Yunior Barceló Chávez barceloch@gmail.com #
# DATE : 9/01/2021 #
#_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-#
"""
This file contains different customized widgets
Availabe classes:
-----------------
- HoverOneLineListItem
- LabelForList
- LabelForListStudent
- AdminInfoLabel
- AdminInfoEditField
- CustomRecycleView
"""
from kivymd.uix.list import OneLineListItem
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.recycleview import RecycleView
from kivy.uix.bubble import Bubble, BubbleButton
from hoverable import HoverBehavior
from kivy.uix.floatlayout import FloatLayout
from kivy.app import App
class LabelForList(Label):
"""
This class creates universal label to be used in list items across this application
"""
pass
class TotalsInfoLabel(BoxLayout):
"""
Customized `Label` to show personal/credential informations of the admin
"""
pass
class PanelInfoLabel(BoxLayout):
"""
Customized `Label` to show personal/credential informations of the admin
"""
pass
class CustomBubbleButton(BubbleButton):
def add_text(self):
app= App.get_running_app()
index=app.root.ids.registerScreen.ids.input_field.cursor[0]-1
if self.text!="<-":
app.root.ids.registerScreen.ids.input_field.text=app.root.ids.registerScreen.ids.input_field.text[:index+1]+self.text + app.root.ids.registerScreen.ids.input_field.text[index+1:]
app.root.ids.registerScreen.ids.input_field.cursor=(index+2,0)
else:
app.root.ids.registerScreen.ids.input_field.text=app.root.ids.registerScreen.ids.input_field.text[:index] + app.root.ids.registerScreen.ids.input_field.text[index+1:] if index != -1 and app.root.ids.registerScreen.ids.input_field.cursor != (0,0) else app.root.ids.registerScreen.ids.input_field.text
app.root.ids.registerScreen.ids.input_field.cursor=(index,0)
pass
class NumericKeyboard(Bubble):
def on_touch_up(self, touch):
app= App.get_running_app()
if not self.collide_point(*touch.pos) and not self.parent.collide_point(*touch.pos):
self.parent.remove_widget(self.parent.bubb)
app.root.ids.registerScreen.ids.input_field.focus=False
delattr(app.root.ids.registerScreen.ids.input_field.parent, 'bubb')
def __init__(self, **kwargs):
super(NumericKeyboard, self).__init__(**kwargs)
self.create_bubble_button()
def create_bubble_button(self):
numeric_keypad = ['7', '8', '9', '4', '5', '6', '1', '2', '3', '0', '', '<-']
for x in numeric_keypad:
if x == '':
bubb_btn = CustomBubbleButton(disabled=True, text=str(x),font_name='zekton__.ttf', bold=True, font_size="20sp")
else:
bubb_btn = CustomBubbleButton(text=str(x),font_name='zekton__.ttf', bold=True, font_size="20sp")
self.numeric_keyboard_layout.add_widget(bubb_btn)
class ShowInputBubble(FloatLayout):
def show_bubble(self, *l):
if not hasattr(self, 'bubb'):
self.bubb = NumericKeyboard()
self.bubb.arrow_pos = "top_mid"
self.add_widget(self.bubb) |
6,738 | 168a76fd3bb43afe26a6a217e90f48704b4f2042 | #!/usr/bin/env python3
import os
import requests
# This is the main url of the BSE API
# THIS WILL CHANGE TO HTTPS IN THE FUTURE
# HTTPS IS RECOMMENDED
main_bse_url = "http://basissetexchange.org"
# This allows for overriding the URL via an environment variable
# Feel free to just use the base_url below
base_url = os.environ.get('BSE_API_URL', main_bse_url)
def print_results(r):
'''Checks for errors and prints the results of a request'''
# r.text will contain the formatted output as a string
print(r.text)
if r.status_code != 200:
raise RuntimeError("Could not obtain data from the BSE. Check the error information above")
############################################
# Change the user agent and 'from' headers
############################################
# Change these to something more
# descriptive if you would like. This lets us know
# how many different people/groups are using the site
# Valid email is COMPLETELY OPTIONAL. Put whatever
# you would like in there, or leave it as is. If you
# do put your email there, we will never give it
# away or email you, except in case we think errors in
# your script are causing us problems.
headers = {
'User-Agent': 'BSE Example Python Script',
'From': 'bse@molssi.org'
}
###############################################################
# Get the def2-QZVP basis for all elements in nwchem format
# Note that basis set names and formats are not case sensitive
###############################################################
r = requests.get(base_url + '/api/basis/def2-qzvpd/format/nwchem',
headers=headers
)
print_results(r)
######################################################################
# Get the cc-pvqz basis for hydrogen and carbon in gaussian94 format
######################################################################
# Elements can be passed a variety of ways. Here, I'm just
# passing a list of Z numbers. See elements.py for other ways
# you can specify elements
params = {'elements': [1, 6, 7]}
r = requests.get(base_url + '/api/basis/cc-pvqz/format/psi4',
params=params,
headers=headers
)
print_results(r)
|
6,739 | ac31cba94ee8ff7a2903a675954c937c567b5a56 |
def encrypt(key,plaintext):
ciphertext=""
for i in plaintext:
if i.isalpha():
alphabet = ord(i)+key
if alphabet > ord("Z"):
alphabet -= 26
letter = chr(alphabet)
ciphertext+=letter
return ciphertext
def decrypt(key,ciphertext):
plaintext=""
for i in ciphertext:
if i.isalpha():
alphabet = ord(i)-key
if alphabet < ord("A"):
alphabet += 26
letter = chr(alphabet)
plaintext+=letter
return plaintext
|
6,740 | f15f49a29f91181d0aaf66b19ce9616dc7576be8 | # Generated by Django 3.1.7 on 2021-04-16 05:56
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('Checkbook', '0002_auto_20210415_2250'),
]
operations = [
migrations.AlterModelManagers(
name='transaction',
managers=[
('Transactions', django.db.models.manager.Manager()),
],
),
]
|
6,741 | 46cdea08cab620ea099ad7fa200782717249b91b | #
# PySNMP MIB module SYSLOG-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SYSLOG-TC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:31:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, ObjectIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Bits, Gauge32, MibIdentifier, iso, ModuleIdentity, NotificationType, Counter32, Counter64, IpAddress, mib_2 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "ObjectIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Bits", "Gauge32", "MibIdentifier", "iso", "ModuleIdentity", "NotificationType", "Counter32", "Counter64", "IpAddress", "mib-2")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
syslogTCMIB = ModuleIdentity((1, 3, 6, 1, 2, 1, 173))
syslogTCMIB.setRevisions(('2009-03-30 00:00',))
if mibBuilder.loadTexts: syslogTCMIB.setLastUpdated('200903300000Z')
if mibBuilder.loadTexts: syslogTCMIB.setOrganization('IETF Syslog Working Group')
class SyslogFacility(TextualConvention, Integer32):
reference = 'The Syslog Protocol (RFC5424): Table 1'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))
namedValues = NamedValues(("kern", 0), ("user", 1), ("mail", 2), ("daemon", 3), ("auth", 4), ("syslog", 5), ("lpr", 6), ("news", 7), ("uucp", 8), ("cron", 9), ("authpriv", 10), ("ftp", 11), ("ntp", 12), ("audit", 13), ("console", 14), ("cron2", 15), ("local0", 16), ("local1", 17), ("local2", 18), ("local3", 19), ("local4", 20), ("local5", 21), ("local6", 22), ("local7", 23))
class SyslogSeverity(TextualConvention, Integer32):
reference = 'The Syslog Protocol (RFC5424): Table 2'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("emerg", 0), ("alert", 1), ("crit", 2), ("err", 3), ("warning", 4), ("notice", 5), ("info", 6), ("debug", 7))
mibBuilder.exportSymbols("SYSLOG-TC-MIB", syslogTCMIB=syslogTCMIB, SyslogFacility=SyslogFacility, PYSNMP_MODULE_ID=syslogTCMIB, SyslogSeverity=SyslogSeverity)
|
6,742 | 6e0d09bd0c9d1d272f727817cec65b81f83d02f5 | containerized: "docker://quay.io/snakemake/containerize-testimage:1.0"
rule a:
output:
"test.out"
conda:
"env.yaml"
shell:
"bcftools 2> {output} || true"
|
6,743 | 54d714d1e4d52911bcadf3800e7afcc2c9a615a5 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 7 03:41:18 2020
@author: owlthekasra
"""
import methods as md
import add_label as al
import numpy as np
import pandas as pd
import random
sb_rd_1 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/trials_2'
sb_rd_2 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/trials_3'
sb_rd_3 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass/extra_deleted_metadata'
ns_rd_1 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/no_sound/trials_1'
ns_rd_2 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/no_sound/trials_2'
sbt_rd_1 = '/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/sine_bass_thought/trials_1'
df_sine_bass_trials = al.get_long_dataframe(sb_rd_1).append(al.get_long_dataframe(sb_rd_2))
df_no_sound_trials = al.get_long_dataframe(ns_rd_1).append(al.get_long_dataframe(ns_rd_2))
df_sine_bass_thought_trials = al.get_long_dataframe(sbt_rd_1)
# _, df_sine_bass_extra = al.get_all_dataframes(sb_rd_3, 1)
# _, df_sine_bass_trials_2 = al.get_all_dataframes(sb_rd_1, 1)
# _, df_sine_bass_trials_3 = al.get_all_dataframes(sb_rd_2, 1)
# _, df_no_sound_trials_1 = al.get_all_dataframes(ns_rd_1, 0)
# _, df_no_sound_trials_2 = al.get_all_dataframes(ns_rd_2, 0)
# _, df_sine_bass_thought_trials_1 = al.get_all_dataframes(sbt_rd_1, 2)
# diff_labels = [df_sine_bass_thought_trials_1, df_sine_bass_extra, df_sine_bass_trials_2, df_sine_bass_trials_3, df_no_sound_trials_1, df_no_sound_trials_2]
# big_frame = pd.concat(diff_labels, ignore_index=True)
# bg = big_frame.iloc[:, :513]
# sound = bg[bg["label"]==1].iloc[:,1:]
# nosound = bg[bg["label"]==0].iloc[:,1:]
# imagesound = bg[bg["label"]==2].iloc[:,1:]
def get_X_and_y(df, start=1):
y = df[['label']]
X = df[:, start:]
return (X, y)
def subtract_moving_average(df, n=50):
k = n
bgnorm = pd.DataFrame(np.zeros((len(df), len(df.columns))))
for j in range(0, len(df)):
for i in range(0, len(df.columns)):
#define indices
indices = range(max(1, i-k), min(i+k, len(df.columns)));
avg = df.iloc[j, :]
avg = avg.iloc[indices].mean()
newnum = df.iloc[j, i] - avg
print(newnum)
bgnorm.iloc[j, i] = newnum
return bgnorm
# #preprocess thought sine wave only
# y_values_thought = df_sine_bass_thought_trials_1.iloc[:, 0]
# X_values_thought = df_sine_bass_thought_trials_1.iloc[:, 132:660]
# df_thought = pd.concat([y_values_thought, X_values_thought], axis=1, ignore_index=True)
snd = pd.DataFrame()
# sn2 = sound.reset_index().iloc[:,1:].T
for i in range(0, int(len(sound)/4)):
snd = pd.concat([snd, sound.iloc[i*4:i*4+4, :]], axis = 1)
#separate channels into different dataframes
bg1 = bg.iloc[::4, :]
bg2 = bg.iloc[1::4, :]
bg3 = bg.iloc[2::4, :]
bg4 = bg.iloc[3::4, :]
bigX = bg.iloc[:, 1:]
bigy = bg.iloc[:,0]
#subtracting average of each row
bigX = lab.iloc[:, 1:]
len(bigX.columns)
len(bigX)
bgnorm = subtract_moving_average(bigX)
bgnormlab = pd.concat([bigy, bgnorm], axis=1)
bgnormlab.to_csv('bgnormalized3600x517.csv')
bgnormlab = pd.read_csv('/Users/owlthekasra/Documents/Code/Python/AudioStimulus/data/csv/bgnormalized3600x517.csv')
j3 = pd.DataFrame()
def get_mean_down_df(df, nchan=4 ):
avg_df = pd.DataFrame()
for i in range(1, len(df)+1):
if ((i % nchan) == 0):
j5 = range(i-nchan,i)
j1 = df.iloc[j5, :]
k1 = j1.mean()
avg_df = avg_df.append(k1, ignore_index=True)
return avg_df
j5 = range(0,8)
j1 = bigX.iloc[j5, :]
bgnormavg = get_mean_down_df(bgnormlab)
lab = bgnormavg.iloc[:,-1]
lab = lab.append(bgnormavg.iloc[:,:-1])
indices = range(397, 417)
j1 = lab.iloc[1, :].iloc[1:]
j2 = j1.iloc[indices]
j3 = j2.mean()
random.seed(100)
# bssss = bgnormavg.drop(columns=['Unnamed: 0'])
main = bg1.sample(frac=1)
main = main.reset_index()
main = main.iloc[:, 1:]
train = main.iloc[:650]
val = main.iloc[650:]
# main2 = main.sample(frac=1)
X_train = main.iloc[:, 1:]
y_train = main['label']
X_val = val.iloc[:, 1:]
y_val = val['label']
model, acc, pred, y_test = md.fitPredictValSet(X_train, y_train, X_val, y_val, 'tree')
print(indices) |
6,744 | 8a848eece6a3ed07889ba208068de4bfa0ad0bbf | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for test classes."""
import contextlib
import os
import re
import unittest
import webtest
from core.domain import config_domain
from core.domain import exp_domain
from core.domain import exp_services
from core.platform import models
current_user_services = models.Registry.import_current_user_services()
import feconf
import main
import json
CSRF_REGEX = (
r'csrf_token: JSON\.parse\(\'\\\"([A-Za-z0-9/=_-]+)\\\"\'\)')
# Prefix to append to all lines printed by tests to the console.
LOG_LINE_PREFIX = 'LOG_INFO_TEST: '
def empty_environ():
os.environ['AUTH_DOMAIN'] = 'example.com'
os.environ['SERVER_NAME'] = 'localhost'
os.environ['HTTP_HOST'] = 'localhost'
os.environ['SERVER_PORT'] = '8080'
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
os.environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
os.environ['HTTP_HOST'], os.environ['SERVER_PORT'])
class TestBase(unittest.TestCase):
"""Base class for all tests."""
maxDiff = 2500
DEFAULT_USERNAME = 'defaultusername'
def setUp(self):
raise NotImplementedError
def tearDown(self):
raise NotImplementedError
def log_line(self, line):
"""Print the line with a prefix that can be identified by the
script that calls the test.
"""
print '%s%s' % (LOG_LINE_PREFIX, line)
def _delete_all_models(self):
raise NotImplementedError
def login(self, email, is_super_admin=False):
os.environ['USER_EMAIL'] = email
os.environ['USER_ID'] = self.get_user_id_from_email(email)
os.environ['USER_IS_ADMIN'] = '1' if is_super_admin else '0'
def logout(self):
os.environ['USER_EMAIL'] = ''
os.environ['USER_ID'] = ''
os.environ['USER_IS_ADMIN'] = '0'
def shortDescription(self):
"""Additional information logged during unit test invocation."""
# Suppress default logging of docstrings.
return None
def get_expected_login_url(self, slug):
"""Returns the expected login URL."""
return current_user_services.create_login_url(slug)
def get_expected_logout_url(self, slug):
"""Returns the expected logout URL."""
return current_user_services.create_logout_url(slug)
def _parse_json_response(self, json_response, expect_errors=False):
"""Convert a JSON server response to an object (such as a dict)."""
if not expect_errors:
self.assertEqual(json_response.status_int, 200)
self.assertEqual(
json_response.content_type, 'application/javascript')
self.assertTrue(json_response.body.startswith(feconf.XSSI_PREFIX))
return json.loads(json_response.body[len(feconf.XSSI_PREFIX):])
def get_json(self, url):
"""Get a JSON response, transformed to a Python object."""
json_response = self.testapp.get(url)
self.assertEqual(json_response.status_int, 200)
return self._parse_json_response(json_response, expect_errors=False)
def post_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200, upload_files=None):
"""Post an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.post(
str(url), data, expect_errors=expect_errors,
upload_files=upload_files)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def put_json(self, url, payload, csrf_token=None, expect_errors=False,
expected_status_int=200):
"""Put an object to the server by JSON; return the received object."""
data = {'payload': json.dumps(payload)}
if csrf_token:
data['csrf_token'] = csrf_token
json_response = self.testapp.put(
str(url), data, expect_errors=expect_errors)
self.assertEqual(json_response.status_int, expected_status_int)
return self._parse_json_response(
json_response, expect_errors=expect_errors)
def get_csrf_token_from_response(self, response):
"""Retrieve the CSRF token from a GET response."""
return re.search(CSRF_REGEX, response.body).group(1)
def register_editor(self, email, username=None):
"""Register a user with the given username as an editor."""
if username is None:
username = self.DEFAULT_USERNAME
self.login(email)
response = self.testapp.get(feconf.EDITOR_PREREQUISITES_URL)
csrf_token = self.get_csrf_token_from_response(response)
response = self.testapp.post(feconf.EDITOR_PREREQUISITES_DATA_URL, {
'csrf_token': csrf_token,
'payload': json.dumps({
'username': username,
'agreed_to_terms': True
})
})
self.assertEqual(response.status_int, 200)
self.logout()
def set_admins(self, admin_emails):
"""Set the ADMIN_EMAILS property."""
self.login('superadmin@example.com', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.ADMIN_EMAILS.name: admin_emails,
}
}, csrf_token)
self.logout()
def set_moderators(self, moderator_emails):
"""Set the MODERATOR_EMAILS property."""
self.login('superadmin@example.com', is_super_admin=True)
response = self.testapp.get('/admin')
csrf_token = self.get_csrf_token_from_response(response)
self.post_json('/adminhandler', {
'action': 'save_config_properties',
'new_config_property_values': {
config_domain.MODERATOR_EMAILS.name: moderator_emails,
}
}, csrf_token)
self.logout()
def get_current_logged_in_user_id(self):
return os.environ['USER_ID']
def get_user_id_from_email(self, email):
return current_user_services.get_user_id_from_email(email)
def save_new_default_exploration(self,
exploration_id, owner_id, title='A title'):
"""Saves a new default exploration written by owner_id.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exp_services.save_new_exploration(owner_id, exploration)
return exploration
def save_new_valid_exploration(
self, exploration_id, owner_id, title='A title'):
"""Saves a new strictly-validated exploration.
Returns the exploration domain object.
"""
exploration = exp_domain.Exploration.create_default_exploration(
exploration_id, title, 'A category')
exploration.states[exploration.init_state_name].widget.handlers[
0].rule_specs[0].dest = feconf.END_DEST
exploration.objective = 'An objective'
exp_services.save_new_exploration(owner_id, exploration)
return exploration
@contextlib.contextmanager
def swap(self, obj, attr, newvalue):
"""Swap an object's attribute value within the context of a
'with' statement. The object can be anything that supports
getattr and setattr, such as class instances, modules, ...
Example usage:
import math
with self.swap(math, "sqrt", lambda x: 42):
print math.sqrt(16.0) # prints 42
print math.sqrt(16.0) # prints 4 as expected.
"""
original = getattr(obj, attr)
setattr(obj, attr, newvalue)
try:
yield
finally:
setattr(obj, attr, original)
class AppEngineTestBase(TestBase):
"""Base class for tests requiring App Engine services."""
def _delete_all_models(self):
from google.appengine.ext import ndb
ndb.delete_multi(ndb.Query().iter(keys_only=True))
def setUp(self):
empty_environ()
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
self.testbed = testbed.Testbed()
self.testbed.activate()
# Configure datastore policy to emulate instantaneously and globally
# consistent HRD.
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# Declare any relevant App Engine service stubs here.
self.testbed.init_user_stub()
self.testbed.init_memcache_stub()
self.testbed.init_datastore_v3_stub(consistency_policy=policy)
self.testbed.init_taskqueue_stub()
self.taskqueue_stub = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
self.testbed.init_urlfetch_stub()
self.testbed.init_files_stub()
self.testbed.init_blobstore_stub()
# Set up the app to be tested.
self.testapp = webtest.TestApp(main.app)
def tearDown(self):
self.logout()
self._delete_all_models()
self.testbed.deactivate()
def count_jobs_in_taskqueue(self):
return len(self.taskqueue_stub.get_filtered_tasks())
def process_and_flush_pending_tasks(self):
from google.appengine.ext import deferred
tasks = self.taskqueue_stub.get_filtered_tasks()
self.taskqueue_stub.FlushQueue('default')
while tasks:
for task in tasks:
if task.url == '/_ah/queue/deferred':
deferred.run(task.payload)
else:
# All other tasks are expected to be mapreduce ones.
headers = {
key: str(val) for key, val in task.headers.iteritems()
}
headers['Content-Length'] = str(len(task.payload or ''))
response = self.testapp.post(
url=str(task.url), params=(task.payload or ''),
headers=headers)
if response.status_code != 200:
raise RuntimeError(
'MapReduce task to URL %s failed' % task.url)
tasks = self.taskqueue_stub.get_filtered_tasks()
self.taskqueue_stub.FlushQueue('default')
if feconf.PLATFORM == 'gae':
GenericTestBase = AppEngineTestBase
else:
raise Exception('Invalid platform: expected one of [\'gae\']')
|
6,745 | 4d066a189bf5151534e0227e67cdc2eed5cd387c | #!/usr/bin/python
# view_rows.py - Fetch and display the rows from a MySQL database query
# import the MySQLdb and sys modules
# katja seltmann April 16, 2013 to run on arthropod data in scan symbiota database
import MySQLdb
import sys
#connection information from mysql
#test database
connect = MySQLdb.connect("", user="", passwd="", db="")
cursor = connect.cursor ()
def InsertMysql(family,genus,specificEpithet):
try:
cursor.execute ("""INSERT INTO sums2 (occid,family,genus,specificEpithet,coleventsLat,georeferenced) VALUES(NULL,"%s","%s","%s",NULL,NULL);"""% (family,genus,specificEpithet))
connect.commit()
except:
connect.rollback()
def ExecuteMysql():
cursor.execute ("""select distinct family,genus,specificEpithet from omoccurrences where specificEpithet != 'UNKNOWN_NULL'""")
data = cursor.fetchall()
for x in data:
family = x[0]
genus = x[1]
specificEpithet = x[2]
InsertMysql(family,genus,specificEpithet)
def ColeventsNOLAT():
cursor.execute ("""select occid,concat(family, genus, specificEpithet) from sums2""")
data = cursor.fetchall()
for x in data:
concat_string = x[1]
occid = str(x[0])
cursor.execute ("""select count(distinct locality,county,stateProvince,municipality,year,month,day) as colevent from omoccurrences where decimalLatitude = '0.0000' and concat(family,genus,specificEpithet) =""" + "'" + concat_string + "'")
data = cursor.fetchone()
colevent = data[0]
if data:
try:
cursor.execute ("""update sums2 set coleventsLat = '%s' where occid = '%s';"""% (colevent,occid))
connect.commit()
except:
connect.rollback()
def GeoCoordinated():
cursor.execute ("""select sums2.occid,omoccurrences.nameOMOConcat,count(distinct decimalLatitude,decimalLongitude,year,month,day) from omoccurrences join sums2 on omoccurrences.nameOMOConcat = sums2.nameConcat where decimalLatitude !='0.0000' and georeferenced is NULL group by omoccurrences.nameOMOConcat limit 20""")
data = cursor.fetchall()
for x in data:
occid = x[0]
georefenced = x[2]
concat_string = x[1]
print occid
print concat_string
print georefenced
if x:
try:
cursor.execute ("""update sums2 set georeferenced = '%s' where occid = '%s';"""% (georefenced,occid))
connect.commit()
except:
connect.rollback()
#ExecuteMysql()
#ColeventsNOLAT()
GeoCoordinated()
connect.close()
# cursor.execute ("""select occid,nameConcat from sums2 where georeferenced is NULL""")
# data = cursor.fetchall()
# for x in data:
# concat_string = x[1]
# print concat_string
# occid = str(x[0])
# cursor.execute ("""select count(distinct decimalLatitude,decimalLongitude,year,month,day) as locality from omoccurrences where decimalLatitude !='0.0000' and concat(family,genus,specificEpithet) =""" + "'" + concat_string + "'")
# data = cursor.fetchone()
# georefenced = data[0]
# if data:
# try:
# cursor.execute ("""update sums2 set georeferenced = '%s' where occid = '%s';"""% (georefenced,occid))
# connect.commit()
# except:
# connect.rollback()
# +-----------------+--------------+------+-----+---------+----------------+
# | Field | Type | Null | Key | Default | Extra |
# +-----------------+--------------+------+-----+---------+----------------+
# | occid | int(10) | NO | PRI | NULL | auto_increment |
# | family | varchar(255) | YES | | NULL | |
# | scientificName | varchar(255) | YES | | NULL | |
# | genus | varchar(255) | YES | | NULL | |
# | specificEpithet | varchar(255) | YES | | NULL | |
# | coleventsLat | int(10) | YES | | NULL | |
# | georeferenced | int(10) | YES | | NULL | |
# +-----------------+--------------+------+-----+---------+----------------+
|
6,746 | e810cde7f77d36c6a43f8c277b66d038b143aae6 | """
Base cache mechanism
"""
import time
import string
import codecs
import pickle
from functools import wraps
from abc import ABCMeta, abstractmethod
from asyncio import iscoroutinefunction
class BaseCache(metaclass=ABCMeta):
"""Base cache class."""
@abstractmethod
def __init__(self, kvstore, makekey, lifetime, fail_silent):
self._kvstore = kvstore
self._makekey = makekey
self._lifetime = lifetime
self._fail_silent = fail_silent
def __call__(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
"""decorator."""
key = self._makekey(func, args, kwargs)
if self._kvstore.exists(key):
value_str = self._kvstore.get(key)
try:
value = pickle.loads(codecs.decode(value_str.encode(), "base64"))
if self._lifetime is None or time.time() - value['time'] < self._lifetime:
result = value['data']
return result
except: # pylint: disable=W0702
if not self._fail_silent:
raise
result = func(*args, **kwargs)
value = {'time': time.time(), 'data': result}
value_str = codecs.encode(pickle.dumps(value), "base64").decode()
self._kvstore.set(key, value_str)
return result
@wraps(func)
async def async_wrapper(*args, **kwargs):
"""async decorator."""
key = self._makekey(func, args, kwargs)
if self._kvstore.exists(key):
value_str = self._kvstore.get(key)
try:
value = pickle.loads(codecs.decode(value_str.encode(), "base64"))
if self._lifetime is None or time.time() - value['time'] < self._lifetime:
result = value['data']
return result
except: # pylint: disable=W0702
if not self._fail_silent:
raise
result = await func(*args, **kwargs)
value = {'time': time.time(), 'data': result}
value_str = codecs.encode(pickle.dumps(value), "base64").decode()
self._kvstore.set(key, value_str)
return result
if iscoroutinefunction(func):
return async_wrapper
return wrapper
@staticmethod
def makekey(function, *args, **kwargs) -> str:
"""creates a unique key based to be used when storing the cache.
:param function: function
:param *args: positional args of the function
:param **kwargs: keyword arguments of the function
:return: string base64 key
"""
arguments = str((function.__name__, args, kwargs)).strip()
arguments = arguments.translate(
str.maketrans('', '', string.punctuation+string.whitespace)
)
key = codecs.encode(pickle.dumps(arguments, protocol=0), "base64").decode().strip()
return key
|
6,747 | 0656c3e1d8f84cfb33c4531e41efb4a349d08aac | from pathlib import Path
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session
# SQLALCHEMY_DATABASE_URL = "postgresql://user:password@postgresserver/db"
SQLALCHEMY_DATABASE_URL = f"sqlite:///{Path(__name__).parent.absolute()}/sql_app.db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL,
connect_args={"check_same_thread": False} # Needed only for SQLite
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# # FastAPI "Dependency" (used with Depends)
# def get_db():
# db = SessionLocal()
# try:
# yield db
# finally:
# db.close()
Base = declarative_base()
from flashcards_core.database.algorithms.model import Algorithm
from flashcards_core.database.algorithm_params.model import AlgorithmParam
from flashcards_core.database.cards.model import Card
from flashcards_core.database.decks.model import Deck
from flashcards_core.database.faces.model import Face
from flashcards_core.database.facts.model import Fact
from flashcards_core.database.reviews.model import Review
from flashcards_core.database.tags.model import Tag
from flashcards_core.database.many_to_many.model import FaceFact, DeckTag, CardTag, FaceTag, FactTag
# Create all the tables imported above
Base.metadata.create_all(bind=engine) |
6,748 | d56fa4ea999d8af887e5f68296bfb20ad535e6ad | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base PXE Interface Methods
"""
from ironic_lib import metrics_utils
from oslo_log import log as logging
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import pxe_utils as pxe_utils
from ironic.drivers.modules import deploy_utils
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
REQUIRED_PROPERTIES = {
'deploy_kernel': _("UUID (from Glance) of the deployment kernel. "
"Required."),
'deploy_ramdisk': _("UUID (from Glance) of the ramdisk that is "
"mounted at boot time. Required."),
}
OPTIONAL_PROPERTIES = {
'force_persistent_boot_device': _("True to enable persistent behavior "
"when the boot device is set during "
"deploy and cleaning operations. "
"Defaults to False. Optional."),
}
RESCUE_PROPERTIES = {
'rescue_kernel': _('UUID (from Glance) of the rescue kernel. This value '
'is required for rescue mode.'),
'rescue_ramdisk': _('UUID (from Glance) of the rescue ramdisk with agent '
'that is used at node rescue time. This value is '
'required for rescue mode.'),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
COMMON_PROPERTIES.update(RESCUE_PROPERTIES)
class PXEBaseMixin(object):
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return COMMON_PROPERTIES
@METRICS.timer('PXEBaseMixin.clean_up_ramdisk')
def clean_up_ramdisk(self, task):
"""Cleans up the boot of ironic ramdisk.
This method cleans up the PXE environment that was setup for booting
the deploy or rescue ramdisk. It unlinks the deploy/rescue
kernel/ramdisk in the node's directory in tftproot and removes it's PXE
config.
:param task: a task from TaskManager.
:param mode: Label indicating a deploy or rescue operation
was carried out on the node. Supported values are 'deploy' and
'rescue'. Defaults to 'deploy', indicating deploy operation was
carried out.
:returns: None
"""
node = task.node
mode = deploy_utils.rescue_or_deploy_mode(node)
try:
images_info = pxe_utils.get_image_info(node, mode=mode)
except exception.MissingParameterValue as e:
LOG.warning('Could not get %(mode)s image info '
'to clean up images for node %(node)s: %(err)s',
{'mode': mode, 'node': node.uuid, 'err': e})
else:
pxe_utils.clean_up_pxe_env(task, images_info)
@METRICS.timer('PXEBaseMixin.validate_rescue')
def validate_rescue(self, task):
"""Validate that the node has required properties for rescue.
:param task: a TaskManager instance with the node being checked
:raises: MissingParameterValue if node is missing one or more required
parameters
"""
pxe_utils.parse_driver_info(task.node, mode='rescue')
|
6,749 | ae45a4967a8ee63c27124d345ad4dc0c01033c0e | from mikeio.spatial import GeometryPoint2D, GeometryPoint3D
# https://www.ogc.org/standard/sfa/
def test_point2d_wkt():
p = GeometryPoint2D(10, 20)
assert p.wkt == "POINT (10 20)"
p = GeometryPoint2D(x=-5642.5, y=120.1)
assert p.wkt == "POINT (-5642.5 120.1)"
def test_point3d_wkt():
p = GeometryPoint3D(10, 20, 30)
assert p.wkt == "POINT Z (10 20 30)"
def test_point2d_to_shapely():
p = GeometryPoint2D(10, 20)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.wkt == p.wkt
def test_point3d_to_shapely():
p = GeometryPoint3D(10, 20, -1)
sp = p.to_shapely()
assert sp.x == 10
assert sp.y == 20
assert sp.z == -1
assert sp.wkt == p.wkt
|
6,750 | 0584ff5cb252fba0fe1fc350a5fb023ab5cbb02b | from django.db import models
class Category(models.Model):
name = models.CharField(max_length=50, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Categoria'
class Books(models.Model):
name = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
category = models.ForeignKey(
Category, on_delete=models.CASCADE, related_name='category')
def __str__(self):
return self.name
class Meta:
verbose_name = 'Livro'
class Student(models.Model):
name = models.CharField(max_length=70)
cpf = models.CharField(max_length=14)
birth_date = models.DateField()
city = models.CharField(max_length=50)
registration_date = models.DateTimeField(auto_now_add=True)
email = models.EmailField(max_length=50)
tel = models.CharField(max_length=15)
book= models.ForeignKey(
Books, on_delete=models.CASCADE, related_name='book')
class Meta:
verbose_name = 'Estudante'
ordering = ['-id']
def __str__(self):
return self.name
|
6,751 | 493552469943e9f9f0e57bf92b874c8b67943de5 | import os
from sources.lol.status import LOLServerStatusCollector
from util.abstract.feed import Feed
from util.abstract.handler import Handler
from util.functions.load_json import load_json
class LoLServerStatusHandler(Handler):
def load_servers(self):
servers_filepath = os.path.join(os.path.dirname(__file__), '../../data/lol/status.json')
return load_json(servers_filepath)
def get_filepath(self, server):
return '/lol/{region}/status.{locale}.xml'.format(region=server['region'], locale=server['locale'])
def process_server(self, server):
collector = LOLServerStatusCollector(server)
items = collector.collect()
alternateLink = collector.construct_alternate_link()
feed = Feed()
feed.setTitle(server['title'])
feed.setAlternateLink(alternateLink)
feed.setLanguage(server['locale'])
feed.setItems(items)
return feed
def handle(event={}, context={}):
"""Handler for AWS Lambda - LoL Server Status"""
LoLServerStatusHandler().run()
return 'ok'
|
6,752 | ceca1be15aded0a842c5f2c6183e4f54aba4fd24 | v = 426
# print 'Yeah!' if dividable by 4 but print 'End of program' after regardless
if (v%4) == 0:
print ("Yeah!")
else:
print ("End of the program")
|
6,753 | af9430caff843242381d7c99d76ff3c964915700 | import os
from flask import Flask,render_template,request,redirect,url_for
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session,sessionmaker
app = Flask(__name__)
engine = create_engine("postgres://lkghylsqhggivp:d827f6dc5637928e95e060761de590b7d9514e9463c5241ed3d652d777a4a3a9@ec2-52-200-16-99.compute-1.amazonaws.com:5432/d6d65s4otfm5cr")
db = scoped_session(sessionmaker(bind=engine))
@app.route("/")
def index():
return render_template("a.html")
@app.route("/insert",methods=['POST'])
def insert():
firstname=request.form.get('firstname')
lastname=request.form.get('lastname')
dob=request.form.get('dob')
gender=request.form.get('gender')
aadharno=request.form.get('aadharno')
address=request.form.get('address')
db.execute("insert into aadhar (firstname,lastname,dob,gender,aadharno,address) values (:firstname,:lastname,:dob,:gender,:aadharno,:address)",{"firstname":firstname ,"lastname":lastname,"dob":dob,"gender":gender,"aadharno":aadharno,"address" : address})
db.commit()
return redirect(url_for('index'))
|
6,754 | 4c66ab6110e81bb88fc6916a1695e0f23e6e0e9d | from timeit import default_timer as timer
import numpy as np
bets1 = [ # lowest config possible
0.00000001,
0.00000004,
0.0000001,
0.0000005,
0.00000150,
0.00000500,
0.00001000
]
bets2 = [ # 2 is 10x 1
0.0000001,
0.0000004,
0.000001,
0.000005,
0.0000150,
0.0000500,
0.0001000
]
# options
max_seeds = 100
max_rolls = 100000 # 100k is around 8-24 hours of fastplay
seed_wins = 0
num_rolls = []
start_position = np.random.randint(1, 100000000)
for seed in range(start_position, start_position+max_seeds):
# current game round stats
cur_wins = 0
max_wins = 0
cur_losses = 0
max_losses = 0
win_streak = []
loss_streak = []
# seed data and timer
np.random.seed(seed)
start_time = timer()
start_bal = cur_bal = 0.001 # 10$ reasonable start
# actual Play
for index in range(max_rolls):
# make bets
bets = [ # this appears to be working, a function of cur_bal
0.00000001,
float('{:.8f}'.format(cur_bal * 0.001)),
float('{:.8f}'.format(cur_bal * 0.002)),
float('{:.8f}'.format(cur_bal * 0.005)),
float('{:.8f}'.format(cur_bal * 0.01)),
float('{:.8f}'.format(cur_bal * 0.05)),
float('{:.8f}'.format(cur_bal * 0.12)),
float('{:.8f}'.format(cur_bal * 0.3)),
]
# if Winning... Stop
if (cur_bal / start_bal - 1)*100 > 10000 or index==max_rolls-1:
print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'.format(
seed, index, cur_bal, (cur_bal/start_bal-1)*100))
print('Max_L: {}'.format(max_losses))
print('Max_W: {}'.format(max_wins))
#print('Won The Day!')
seed_wins += 1
num_rolls.append(index)
break
# get bet
if cur_losses < len(bets):
bet = bets[cur_losses]
else:
bet = bets[0]
if bet < bets[0]: # dont bet less than 8 decimal places
bet = bets[0]
# if Losing ... Stop
if cur_bal <= 0:
break
if bet >= cur_bal:
#print('Seed: {}, Num_Rolls {}, Balance: {:.8f} | Profit: {:.2f}%'.format(
# seed, index, cur_bal, (cur_bal/start_bal-1)*100))
#print('Game Over man!')
break
## >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> MAKE PLAY
roll = np.random.randint(1, 10000)
win = True if roll < 3900 else False ## 3900/10000 appears to be a good handicap
## <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# fix balance
if win:
loss_streak.append(cur_losses)
cur_bal += bet * 2
cur_losses = 0
cur_wins += 1
else:
win_streak.append(cur_wins)
cur_bal -= bet
cur_losses += 1
cur_wins = 0
# fix maxes
if cur_losses > max_losses:
max_losses = cur_losses
if cur_wins > max_wins:
max_wins = cur_wins
# /actual play
# seed stuff
seed_time = timer() - start_time
print('Seed_time: {:.2f}'.format(seed_time), end='\r') # you will see this a lot if losing
# Finished All Seeds
print('Won {}/{} Seeds'.format(seed_wins,max_seeds))
if seed_wins: # if won anything.
print('Avg # of rolls to 1000%: {}'.format(int(np.array(num_rolls).mean())))
|
6,755 | 9087a7bf42070fdb8639c616fdf7f09ad3903656 | from .chair_model import run_chair_simulation, init_omega_t, \
JumpingModel, H_to_L
from .utils import load_hcp_peaks, Condition, average_peak_counts
|
6,756 | 24ad62342fb9e7759be8561eaf0292736c7dcb6d | import sys
def tackle_mandragora(health):
health.sort()
# for all tipping points, where we change over from eating to defeating
defeating = defeating_cost_precompute(health)
opt = 0
for i in range(0, len(health) + 1):
opt = max(opt, defeating_cost(i, defeating))
return opt
def defeating_cost(i, defeating):
return (i + 1) * defeating[i]
def defeating_cost_precompute(health):
n = len(health) + 1
defeating = [0 for x in range(n)]
# defeating[len(health)] = health[len(health) - 1]
defeating[len(health)] = 0
for i in range(len(health) - 1, -1, -1):
defeating[i] = defeating[i + 1] + health[i]
return defeating
T = int(sys.stdin.readline())
for i in range(T):
N = int(sys.stdin.readline())
health = []
line = sys.stdin.readline().split()
for health_val in line:
health.append(int(health_val))
print (tackle_mandragora(health))
|
6,757 | 2fd490ca54f5d038997cec59a3e07c3f2c2d2538 | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name ='park-home'),
path('login/', views.login, name ='park-login'),
] |
6,758 | 359f4fa75379cc2dd80d372144ced08b8d15e0a4 | def rank_and_file(l):
dict = {}
final_list = []
for each in l:
for num in each:
dict[num] = dict[num] + 1 if num in dict else 1
for key in dict:
if dict[key] % 2 != 0:
final_list.append(key)
final_list = sorted(final_list)
return " ".join(map(str, final_list))
f = open('B-large.in.txt', 'r')
f2 = open('outputLarge.txt', 'w')
final = ''
for i in range(1, int(f.readline().strip())+1):
l = []
for j in range(2*int(f.readline()) - 1):
l.append(map(int, f.readline().strip().split()))
final += 'Case #{}: {}\n'.format(i, rank_and_file(l))
f2.write(final) |
6,759 | 40f57ccb1e36d307b11e367a2fb2f6c97051c65b | # @Time : 2019/6/2 8:42
# @Author : Xu Huipeng
# @Blog : https://brycexxx.github.io/
class Solution:
def isPalindrome(self, x: int) -> bool:
num_str = str(x)
i, j = 0, len(num_str) - 1
while i < j:
if num_str[i] == num_str[j]:
i += 1
j -= 1
continue
return False
return True
def isPalindrome1(self, x: int) -> bool:
if x < 0: return False
res = []
while x >= 1:
tmp = x // 10
res.append(x - tmp * 10)
x = tmp
i, j = 0, len(res) - 1
while i < j:
if res[i] == res[j]:
i += 1
j -= 1
continue
return False
return True
def isPalindrome2(self, x: int) -> bool:
if x < 0: return False
div = 1
while x // div >= 10: div *= 10
while x > 0:
left = x // div
right = x % 10
if left != right: return False
x = (x % div) // 10
div //= 100
return True
def isPalindrome3(self, x: int) -> bool:
if x < 0 or (x % 10 == 0 and x != 0): return False
revert_num = 0
while revert_num < x:
num = x % 10
revert_num = revert_num * 10 + num
x //= 10
return revert_num == x or revert_num // 10 == x
if __name__ == '__main__':
s = Solution()
print(s.isPalindrome3(121))
|
6,760 | 789f098fe9186d2fbda5417e9938930c44761b83 | # Unsolved:Didn't try coz of this warning:
# If you use Python, then submit solutions on PyPy. Try to write an efficient solution.
from sys import stdin
from collections import defaultdict
t = int(input())
for _ in range(t):
n = int(input())
arr = list(map(int, stdin.readline().strip().split()))
d = defaultdict(int) # frequency of elements in array
maxnum = 0
for num in arr:
d[num] += 1
if num>maxnum: maxnum = num
special = set()
ans = 0
for i in range(n-1):
ssf = arr[i]
for j in range(i+1, n):
ssf += arr[j]
if ssf>maxnum:break # TLE without this condition
if d[ssf] and ssf not in special:
special.add(ssf)
ans += d[ssf]
print(ans)
|
6,761 | 2180146da7ea745f5917ee66fd8c467437b5af4c | # Time :O(N) space: O(1)
def swap(arr, start, end):
while start < end:
arr[start], arr[end] = arr[end], arr[start]
start += 1
end -= 1
def rotation(arr, k, n):
k = k % n
swap(arr, 0, k-1)
print(arr)
swap(arr, k, n-1)
print(arr)
swap(arr, 0, n-1)
print(arr)
if __name__ == '__main__':
arr = [1, 2, 3, 4, 5, 6, 7]
n = len(arr)
k = 4
rotation(arr, k, n)
|
6,762 | cfdfc490396546b7af732417b506100357cd9a1f | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
import RPi.GPIO as gpio # 导入Rpi.GPIO库函数命名为GPIO
import time
gpio.setmode(gpio.BOARD) #将GPIO编程方式设置为BOARD模式
pin = 40
gpio.setup(pin, gpio.OUT) #控制pin号引脚
gpio.output(pin, gpio.HIGH) #11号引脚输出高电平
time.sleep(5) #计时0.5秒
gpio.output(pin, gpio.LOW) #11号引脚输出低电平
time.sleep(1) #计时1秒
gpio.cleanup() #释放使用的GPIO引脚 |
6,763 | 4af72cab6444922ca66641a08d45bcfe5a689844 |
from models import Cell,Board
import random
from pdb import set_trace as bp
status={'end':-1}
game=None
class Game_Service(object):
def __init__(self,row_num,col_num):
self._row_num=row_num
self._col_num=col_num
mine_percent=0.3
self._mine_num=int(mine_percent*float(self._row_num*self._col_num))
self.shifts=[-1,0,1]
def generate_map(self):
""" generate mine map
"""
global game
game=Board(self._row_num,self._col_num)
s=set([])
while len(s)<=self._mine_num:
i=random.randint(0, self._row_num*self._col_num-1)
if i not in s:
self._set_mine(i)
s.add(i)
return {#'board':[game.get_board()[inx].get_neighbor() for inx in range(0,self._row_num*self._col_num)],
#'mines':game.get_mines(),
'row_num':self._row_num,
'col_num':self._col_num}
def _set_mine(self,index):
""" set cell[index] as a mine
and update its neighbor cell's mine number
"""
game.get_cell(index).set_mine() #set current index as mine
game.add_mine(index) #add index to mine_index
# add its neighbor's neighbor_num
temp_r=index/self._col_num
temp_c=index%self._col_num
shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts
if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]
and temp_r+dr in range(0,self._row_num)
and temp_c+dc in range(0,self._col_num)]
for s in shift:
game.get_cell(s[0]*self._col_num+s[1]).add_neighbor()
def choose_mine(self,index):
""" choose a cell
return game status and cells need to change
"""
cell=game.get_cell(index)
update_stack={'type':'continue'}
if cell.isMine():
self._flipAll(update_stack) #clicked on a mine
else:
self._flip(update_stack,index) #clicked on a safe cell
return update_stack
def _flip(self,update_stack,index):
""" flip the chosen cell and its adjcent cells
"""
cell=game.get_cell(index)
if cell.ifFlipped()==False:
cell.flip()
game.decrease_remain()
if cell.isMine()==False and cell.get_neighbor()>0:
update_stack[str(index)]=cell.get_neighbor()
return
elif cell.isMine()==False and cell.get_neighbor()==0:
update_stack[str(index)]=cell.get_neighbor()
temp_r=index/self._col_num
temp_c=index%self._col_num
shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts
if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]
and temp_r+dr in range(0,self._row_num)
and temp_c+dc in range(0,self._col_num)]
for s in shift:
self._flip(update_stack,s[0]*self._col_num+s[1])
def _flipAll(self,update_stack):
""" flip all mines
"""
mines_index=game.get_mines()
for i in mines_index:
update_stack[str(i)]=status['end']
update_stack['row_num']=self._row_num
update_stack['col_num']=self._col_num
update_stack['_mine_num']=len(mines_index)
if len(mines_index)==game.get_remain():
update_stack['type']='win'
else:
update_stack['type']='lose'
|
6,764 | 4e202cf7d7da865498ef5f65efdf5851c62082ff | def decimal_to_binary(num):
if num == 0: return '0'
binary = ''
while num != 0:
binary = str(num % 2) + binary
num = num // 2
return binary
def modulo(numerator, exp, denominator):
binary = decimal_to_binary(exp)
prev_result = numerator
result = 1
for i in range(len(binary) - 2, -1, -1):
reg = binary[i]
prev_result = (prev_result * prev_result) % denominator
if (reg == '1'):
result *= prev_result
if binary[0] == '1':
result *= numerator
return result % denominator
# print(modulo(5, 149, 17))
# print(decimal_to_binary(0))
# print(decimal_to_binary(1))
# print(decimal_to_binary(2))
# print(decimal_to_binary(8))
# print(decimal_to_binary(10))
# print(decimal_to_binary(11)) |
6,765 | 382597628b999f2984dba09405d9ff3dd2f35872 | #! /usr/bin/env python
import RPIO
import sys
RPIO.setwarnings(False)
gpio = int(sys.argv[1])
RPIO.setup(gpio, RPIO.OUT)
input_value = RPIO.input(gpio)
print input_value |
6,766 | c5f0b1dde320d0042a1bf4de31c308e18b53cbeb | version https://git-lfs.github.com/spec/v1
oid sha256:0c22c74b2d9d62e2162d2b121742b7f94d5b1407ca5e2c6a2733bfd7f02e3baa
size 5016
|
6,767 | 886024a528112520948f1fb976aa7cb187a1da46 | import json
parsed = {}
with open('/Users/danluu/dev/dump/terra/filtered_events.json','r') as f:
# with open('/Users/danluu/dev/dump/terra/game-data/2017-05.json','r') as f:
# with open('/Users/danluu/dev/dump/terra/ratings.json','r') as f:
parsed = json.load(f)
# print(json.dumps(parsed, indent=2))
print(json.dumps(parsed["4pLeague_S1_D1L1_G4"]["events"]["faction"], indent=2))
|
6,768 | 5c20eefe8111d44a36e69b873a71377ee7bfa23d | import os, datetime
import urllib
from flask import (Flask, flash, json, jsonify, redirect, render_template,
request, session, url_for)
import util.database as db
template_path=os.path.dirname(__file__)+"/templates"
file=""
if template_path!="/templates":
app = Flask("__main__",template_folder=os.path.dirname(__file__)+"/templates",static_folder=os.path.dirname(__file__)+"/static")
file = open(os.path.dirname(__file__)+'/data/keys.json')
else:
app = Flask("__main__")
file = open('./data/keys.json')
app.secret_key = os.urandom(32)
content = file.read()
keys = json.loads(content)
# has a 5000 calls/day limit
PIXABAY_KEY = keys['Pixabay']
PIXABAY_STUB = "https://pixabay.com/api/?key=" + PIXABAY_KEY + "&q=" #separate words with "+"
@app.route('/')
def home():
if "username" in session:
id_num=db.search_user_list(session["username"], is_usrname=True)[0][2]
finavail=db.search_finance_list(id_num)
goalavail=db.search_goal_list(id_num)
if finavail:
session["finances"]=session["username"]
if goalavail:
session["goals"]=session["username"]
set_goal = db.search_goal_list(id_num)
print(set_goal)
if set_goal != []:
user_id = db.search_user_list(session['username'], is_usrname=True)[0][2]
g = db.search_goal_list(user_id)
b = db.search_finance_list(user_id)
t = db.search_time_list(user_id)
date_now = datetime.date.today()
price = g
perc = g
delta_months = 0
if g != []:
g = g[0][0]
if price != []:
price = price[0][1]
if perc != []:
perc = perc[0][2]
##function to get difference in months between 2 dates
def months_between(date1,date2):
if date1>date2:
date1,date2=date2,date1
m1=date1.year*12+date1.month
m2=date2.year*12+date2.month
months=m2-m1
if date1.day>date2.day:
months-=1
elif date1.day==date2.day:
seconds1=date1.hour*3600+date1.minute+date1.second
seconds2=date2.hour*3600+date2.minute+date2.second
if seconds1>seconds2:
months-=1
return months
if t != []:
t = t[0][0]
delta_months = months_between(datetime.datetime.strptime(t,'%Y-%m-%d'), datetime.datetime.strptime(str(date_now),'%Y-%m-%d'))
print(delta_months)
img = db.search_image_list(user_id)
if img != []:
img = img[0][0]
if b != []:
bal = b[0][0]
inc = b[0][1]
print(b)
print(g)
print(price)
print(perc)
print(img)
if g or price:
if b:
print("Used the first one")
perc_complete = (delta_months * (perc / 100.0) * inc)/price
print(perc_complete)
if perc_complete > 1:
perc_complete = 1
return render_template('home.html',fin=finavail,goal=goalavail, set_goal= set_goal, goal_name =g, goal_price=price,perc_inc = perc, image=img, bal=bal, income=inc, months= delta_months, perc_comp = perc_complete * 100 )
return render_template('home.html',fin=finavail,goal=goalavail)
return render_template('home.html',fin=finavail,goal=goalavail)
return render_template('home.html')
@app.route('/register')
def register():
return render_template('register.html')
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/auth', methods=['POST'])
def auth():
user = request.form.get("user")
paswrd = request.form.get('pass')
if request.form.get("submit")=="Register":
paswrd2 = request.form.get("pass2")
print(paswrd)
print(paswrd2)
if paswrd != paswrd2:
flash("Passwords Do Not Match")
return redirect(url_for('register'))
if db.register(user, paswrd):
flash("Registered successfully")
session['username'] = request.form['user']
else:
flash("Unable to register the user")
return redirect(url_for('register'))
print("Username has been registered previously!")
else:
match=db.search_user_list(user, is_usrname=True)
if len(match)>0:
if match[0][1]==paswrd:
session["username"]=request.form["user"]
else:
flash("wrong Password")
return redirect(url_for('login'))
else:
flash("User not found")
return redirect(url_for('login'))
return redirect(url_for('home'))
@app.route('/finances')
def finance():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
items = db.search_finance_list(user_id)
daily = db.search_expense_list(user_id, is_id=True)
monthly = db.search_monthly_list(user_id, is_id=True)
ratings = db.search_rating_list(user_id, is_id=True)
print(ratings)
print(f"Unlike month, this is daily: {daily}\n")
w = dict([ (x[0], x[1]) for x in daily ])
s = dict([ (x[0], x[1]) for x in monthly ])
r = dict([ (x[0], x[1]) for x in ratings ])
print(f"THIS is monthly: {monthly}")
print(f"THIS is s: {s}")
print(f"These are the ratings: {r}")
total = 0
m_total = 0
for x in w.values():
total += float(x)
for x in s.values():
m_total += float(x)
if items != []:
bal,income,i = items[0]
diction = {"Balance":bal, "Income":income}
return render_template('findata.html',
diction=diction,
daily=w,
months = s,
total=total,
mtotal = m_total,completed=True, ratings=r)
return render_template('findata.html')
@app.route('/fincalc', methods=['POST'])
def calc():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
# print(request.form)
session["finances"]=session["username"]
bal = request.form['balance'][1:]
monthly = request.form['monthly-inputs']
income = request.form['income'][1:]
# print(request.form)
s = request.form
d_rates = request.form['daily-importance']
m_rates = request.form['monthly-importance']
print(d_rates)
user_id = db.search_user_list(session['username'])[0][2]
daily_dict = json.loads(d_rates)
monthly_dict = json.loads(m_rates)
print(daily_dict)
print(monthly_dict)
dai_im = dict([x for x in daily_dict.values()]) # {expenseName: rating, expenseName2: rating, ...}
mon_im = dict([x for x in monthly_dict.values()])
file=os.path.dirname(__file__)+f'/static/ratings.csv'
stringg = "{"
try:
with open(file) as f: # if readable, file already exists
print("File found, not creating...")
f.close()
except Exception as e:
print(e)
with open(file, 'a+') as f: # creates the file
print("File not found, creating...")
f.write(f"ratings,id\n")
f.close()
for item in mon_im:
db.add_rating(item, mon_im[item], user_id)
stringg += "'" + item + "'" + " : " + "'" + str(mon_im[item]) + "'" + " "
for item in dai_im:
db.add_rating(item, dai_im[item], user_id)
stringg += "'" + item + "'" + " : " + "'" + str(dai_im[item]) + "'" + " "
stringg += "}," + str(user_id) + "\n"
with open(file, "r") as f:
lines = f.readlines()
with open(file, "w") as f:
for line in lines:
if str(user_id) != line.strip("\n").split(",")[1]:
f.write(line)
f.write(stringg)
f.close()
daily = request.form['all-inputs']
print(f"This is daily: {monthly}")
daily = json.loads(daily) # dictionary
monthly = json.loads(monthly)
print(f"This is daily now {monthly}")
w = dict([x for x in daily.values()]) # {expense1: $$$, expense2: $$$, ...}
m = dict([x for x in monthly.values()])
print(f"\nThis is calculated m:{m}\n")
db.add_finances(bal, m, income, w, user_id)
flash("Finances updated")
return redirect(url_for('home'))
@app.route('/goals')
def goals():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
g = db.search_goal_list(user_id)
b = db.search_finance_list(user_id)
t = db.search_time_list(user_id)
date_now = datetime.date.today()
price = g
perc = g
delta_months = 0
if g != []:
g = g[0][0]
if price != []:
price = price[0][1]
if perc != []:
perc = perc[0][2]
##function to get difference in months between 2 dates
def months_between(date1,date2):
if date1>date2:
date1,date2=date2,date1
m1=date1.year*12+date1.month
m2=date2.year*12+date2.month
months=m2-m1
if date1.day>date2.day:
months-=1
elif date1.day==date2.day:
seconds1=date1.hour*3600+date1.minute+date1.second
seconds2=date2.hour*3600+date2.minute+date2.second
if seconds1>seconds2:
months-=1
return months
if t != []:
t = t[0][0]
delta_months = months_between(datetime.datetime.strptime(t,'%Y-%m-%d'), datetime.datetime.strptime(str(date_now),'%Y-%m-%d'))
print(delta_months)
img = db.search_image_list(user_id)
if img != []:
img = img[0][0]
if b != []:
bal = b[0][0]
inc = b[0][1]
print(b)
print(g)
print(price)
print(perc)
print(img)
if g or price:
if b:
print("Used the first one")
perc_complete = (delta_months * (perc / 100.0) * inc)/price
print(perc_complete)
if perc_complete > 1:
perc_complete = 1
return render_template('goals.html', goal=g, goal_price=price,perc_inc = perc, image=img, bal=bal, income=inc, months= delta_months, perc_comp = perc_complete * 100 )
else:
print("Used the second")
return render_template('goals.html', goal=g, goal_price=price,perc_inc = perc, image=img)
else:
if b:
return render_template('goals.html', bal=bal, income=inc)
else:
return render_template('goals.html')
@app.route('/gcalc', methods=['POST'])
def gcalc():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
goal_name = request.form['goal']
goal_price = request.form['goal_price'][1:]
percentage = request.form['slide']
print("This is percentage:")
print(percentage)
print("gcalc")
print(goal_name)
print(goal_price)
user_id = db.search_user_list(session['username'])[0][2]
db.add_goals(goal_name, goal_price, percentage, user_id)
a = db.search_image_list(user_id)
print(a)
# optimization to save on api calls
if a == [] or a[0][2] != goal_name:
try:
l = urllib.request.urlopen(PIXABAY_STUB + goal_name.replace(' ', '+') + "&image_type=photo")
p = json.loads(l.read())
img = p['hits'][0]['webformatURL']
except:
return render_template('error.html', err="Cannot connect to API", fix="Try refreshing or contacting the site owner")
else:
img = a[0][1]
db.add_images(img, goal_name, user_id)
flash(f"Goal for {goal_name} at ${goal_price} has been added!")
return redirect(url_for('home'))
@app.route('/sankey')
def sankey():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
return render_template('sankey.html',idnum=user_id)
@app.route('/pie')
def pie():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
return render_template('pie.html',idnum=user_id)
@app.route('/area')
def area():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
user_id = db.search_user_list(session['username'])[0][2]
goal=db.search_goal_list(user_id)
if goal == []:
return redirect(url_for('goals'))
daily=db.search_expense_list(user_id)
monthly=db.search_monthly_list(user_id)
dadict={}
modict={}
print(goal)
ratings={}
for names in daily:
dadict[names[0]]=names[1]
for names in monthly:
modict[names[0]]=names[1]
print(dadict,modict)
percent=0
for names in db.search_rating_list(user_id):
print(names)
if names[0] in modict:
percent=(modict[names[0]]*12)/goal[0][1]
if names[0] in dadict:
percent=(dadict[names[0]]*30*12)/goal[0][1]
if names[1]<=6 and percent >=0.05:
ratings[names[0]]=(names[1],percent)
print(ratings)
return render_template('area.html',idnum=user_id,ratings=ratings)
@app.route('/logout')
def logout():
if 'username' in session:
session.pop('username')
return redirect(url_for('home'))
@app.route('/account')
def account():
if 'username' not in session:
flash("You must be logged in to access this page")
return redirect(url_for('login'))
# print(db.search_user_list(session['username']))
user_list = json.dumps(db.search_user_list(ret_all=True))
print(json.dumps(db.search_user_list(ret_all=True)))
return render_template('accounts.html', user_list=user_list)
@app.route('/update', methods=["POST"])
def update():
print('this is the updates')
update_dict = request.form['all-options']
update_dict = json.loads(update_dict)
print(request.form)
user_ids = db.search_user_list(session['username'])
user = user_ids[0][-1]
print(user)
db.update_user_list(update_dict['username'] or user_ids[0][0], update_dict['password'] or user_ids[0][1], user)
db.reset_statistics(user, update_dict['reset'])
session.pop('username')
session['username'] = update_dict['username'] or user_ids[0][0] # change username in session
flash("Account information updated successfully")
return redirect(url_for('home'))
@app.route('/del')
def delete():
if 'username' not in session:
flash("Woops. You can't be here")
return redirect(url_for('login'))
user = db.search_user_list(session['username'])[0][-1]
print(user)
db.update_user_list(None, None, user, rem=True)
flash("User successfully removed")
session.pop('username')
return redirect(url_for('home'))
if __name__ == "__main__":
app.debug = True
app.run()
|
6,769 | eeb588a162fa222c0f70eb832a0026d0d8adbe9b | import sys
import os.path
root_dir = os.path.dirname(os.path.dirname(__file__))
jsondb_dir = os.path.join(root_dir, 'jsondb')
sys.path.append(jsondb_dir)
|
6,770 | deaa458e51a7a53dd954d772f9e3b1734508cf28 | '''
REFERENCE a table with a FOREIGN KEY
In your database, you want the professors table to reference the universities table. You can do that by specifying a column in professors table that references a column in the universities table.
As just shown in the video, the syntax for that looks like this:
ALTER TABLE a
ADD CONSTRAINT a_fkey FOREIGN KEY (b_id) REFERENCES b (id);
Table a should now refer to table b, via b_id, which points to id. a_fkey is, as usual, a constraint name you can choose on your own.
Pay attention to the naming convention employed here: Usually, a foreign key referencing another primary key with name id is named x_id, where x is the name of the referencing table in the singular form.
Instructions
100 XP
1 Rename the university_shortname column to university_id in professors.
2 Add a foreign key on university_id column in professors that references the id column in universities.
Name this foreign key professors_fkey.
'''
-- Rename the university_shortname column
ALTER TABLE professors
RENAME COLUMN university_shortname TO university_id;
-- Add a foreign key on professors referencing universities
ALTER TABLE professors
ADD CONSTRAINT professors_fkey FOREIGN KEY (university_id) REFERENCES universities (id); |
6,771 | 2d5e7c57f58f189e8d0c7d703c1672ea3586e4ac | """
Simple neural network using pytorch
"""
import torch
import torch.nn as nn
# Prepare the data
# X represents the amount of hours studied and how much time students spent sleeping
X = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float) # 3 X 2 tensor
# y represent grades.
y = torch.tensor(([92], [100], [89]), dtype=torch.float) # 3 X 1 tensor
# xPredicted is a single input for which we want to predict a grade using
# the parameters learned by the neural network.
xPredicted = torch.tensor(([4, 8]), dtype=torch.float) # 1 X 2 tensor
# Scale units
breakpoint()
X_max, index1 = torch.max(X, 0)
xPredicted_max, index2 = torch.max(xPredicted, 0)
X = torch.div(X, X_max)
xPredicted = torch.div(xPredicted, xPredicted_max)
y = y / 100 # max test score is 100
print("X_max:", X_max)
print("xPredicted_max:", xPredicted_max)
print("X:", X)
print("y:", y)
print("xPredicted:", xPredicted)
class Neural_Network(nn.Module):
"""Neural network class"""
def __init__(self, input_size=2, output_size=1, hidden_size=3):
super(Neural_Network, self).__init__()
# parameters
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
# weights
self.W1 = torch.randn(self.input_size, self.hidden_size) # 3 X 2 tensor
self.W2 = torch.randn(self.hidden_size, self.output_size) # 3 X 1 tensor
def forward(self, X):
"""forward calculation"""
self.z = torch.matmul(X, self.W1) # 3 X 3 ".dot" does not broadcast in PyTorch
self.z2 = self.sigmoid(self.z) # activation function
self.z3 = torch.matmul(self.z2, self.W2)
o = self.sigmoid(self.z3) # final activation function
return o
def backward(self, X, y, o):
"""backward calculation"""
self.o_error = y - o # error in output
self.o_delta = self.o_error * self.sigmoid_prime(o) # derivative of sig to error
self.z2_error = torch.matmul(self.o_delta, torch.t(self.W2))
self.z2_delta = self.z2_error * self.sigmoid_prime(self.z2)
self.W1 += torch.matmul(torch.t(X), self.z2_delta)
self.W2 += torch.matmul(torch.t(self.z2), self.o_delta)
def sigmoid(self, s):
"""calculate sigmoid"""
return 1 / (1 + torch.exp(-s))
def sigmoid_prime(self, s):
"""calculate derivative of sigmoid"""
return s * (1 - s)
def train(self, X, y):
# forward + backward pass for training
o = self.forward(X)
self.backward(X, y, o)
def save_weights(self, model):
# we will use the PyTorch internal storage functions
torch.save(model, "NN")
# you can reload model with all the weights and so forth with:
# torch.load("NN")
def predict(self):
"""predict"""
# @TODO: should be passed in as argument
print ("Predicted data based on trained weights: ")
print ("Input (scaled): \n" + str(xPredicted))
print ("Output: \n" + str(self.forward(xPredicted)))
NN = Neural_Network()
epoch = 1000
for i in range(epoch): # trains the NN epoch times
#print ("#" + str(i) + " Loss: " + str(torch.mean((y - NN(X))**2).detach().item())) # mean sum squared loss
NN.train(X, y)
NN.save_weights(NN)
NN.predict() |
6,772 | 2c2b075f9ea9e8d6559e44ad09d3e7767c48205e | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import numpy as np
def weight_init(layers):
for layer in layers:
if isinstance(layer, nn.BatchNorm1d):
layer.weight.data.fill_(1)
layer.bias.data.zero_()
elif isinstance(layer, nn.Linear):
n = layer.in_features
y = 1.0 / np.sqrt(n)
layer.weight.data.uniform_(-y, y)
layer.bias.data.fill_(0)
# nn.init.kaiming_normal_(layer.weight.data, nonlinearity='relu')
# 传统的预测点击率模型
class LR(nn.Module):
def __init__(self,
feature_nums,
output_dim = 1):
super(LR, self).__init__()
self.linear = nn.Linear(feature_nums, output_dim)
self.bias = nn.Parameter(torch.zeros((output_dim,)))
def forward(self, x):
"""
:param x: Int tensor of size (batch_size, feature_nums, latent_nums)
:return: pctrs
"""
out = self.bias + torch.sum(self.linear(x), dim=1)
return out.unsqueeze(1)
class RNN(nn.Module):
def __init__(self,
feature_nums,
hidden_dims,
bi_lstm,
out_dims=1):
super(RNN, self).__init__()
self.feature_nums = feature_nums # 输入数据特征维度
self.hidden_dims = hidden_dims # 隐藏层维度
self.bi_lism = bi_lstm # LSTM串联数量
self.lstm = nn.LSTM(self.feature_nums, self.hidden_dims, self.bi_lism)
self.out = nn.Linear(self.hidden_dims, out_dims)
def forward(self,x):
x1, _ = self.lstm(x)
a, b, c = x1.shape
out = self.out(x1.view(-1, c))
out1 = out.view(a, b, -1)
return out1
class MLP(nn.Module):
def __init__(self,
feature_nums,
neuron_nums,
dropout_rate,
output_dim=1):
super(MLP, self).__init__()
self.feature_nums = feature_nums
self.neuron_nums = neuron_nums
self.dropout_rate = dropout_rate
deep_input_dims = self.feature_nums
layers = list()
neuron_nums = self.neuron_nums
for neuron_num in neuron_nums:
layers.append(nn.Linear(deep_input_dims, neuron_num))
# layers.append(nn.BatchNorm1d(neuron_num))
layers.append(nn.ReLU())
layers.append(nn.Dropout(p=0.2))
deep_input_dims = neuron_num
weight_init(layers)
layers.append(nn.Linear(deep_input_dims, output_dim))
self.mlp = nn.Sequential(*layers)
def forward(self, x):
"""
:param x: Int tensor of size (batch_size, feature_nums, latent_nums)
:return: pctrs
"""
out = self.mlp(x)
return out |
6,773 | 1f21fdc9a198b31bb0d5bd6dd8f46a1b3b28ec94 | import kwic
mystr = "hello world\nmy test\napples oranges"
#asseirt(kwic0.kwic(mystr) == [])
#assert(kwic1.kwic(mystr) == [mystr])
#assert(len(kwic3.kwic(mystr))==2)
assert len(kwic.kwic(mystr)) == 3
|
6,774 | 8fe9d21bb65b795a6633ab390f7f5d24a90146d5 | x = '我是一个字符串'
y = "我也是一个字符串"
z = """我还是一个字符串"""
#字符串str用单引号(' ')或双引号(" ")括起来
#使用反斜杠(\)转义特殊字符。
s = 'Yes,he doesn\'t'
#如果你不想让反斜杠发生转义,
#可以在字符串前面添加一个r,表示原始字符串
print('C:\some\name')
print('C:\\some\\name')
print(r'C:\some\name')
#反斜杠可以作为续行符,表示下一行是上一行的延续。
s = "abcd\
efg"
print(s)
#还可以使用"""..."""或者'''...'''跨越多行
s = """
Hello I am fine!
Thinks.
"""
print(s)
|
6,775 | bc0bfb0ff8eaf21b15b06eea2ea333381c70bc75 | __author__='rhyschris'
""" Defines the set of actions.
This functions exactly the same as
Actions.cs in the Unity game.
"""
from enum import Enum
class Actions(Enum):
doNothing = 0
crouch = 1
jump = 3
walkTowards = 0x1 << 2
runTowards = 0x2 << 2
moveAway = 0x3 << 2
blockUp = 0x1 << 4
blockDown = 0x2 << 4
attack1 = 0x3 << 4
attack2 = 0x4 << 4
attack3 = 0x5 << 4
attack4 = 0x6 << 4
if __name__ == '__main__':
print "Contents of actions:"
for act in Actions:
print repr(act)
|
6,776 | bff9fb50f1901094c9ab3d61566509835c774f21 | import time
import os
import random
def generate_sequence(difficulty):
print("Try to remember the numbers! : ")
random_list = random.sample(range(1, 101), difficulty)
time.sleep(2)
print(random_list)
time.sleep(0.7)
os.system('cls')
time.sleep(3)
return random_list
def get_list_from_user(difficulty):
print("WHAT WAS THE NUMBERS?? (Write each num at the same order and press Enter) : ")
user_list = []
for i in range(0, difficulty):
user_num = int(input('num: '))
user_list.append(user_num)
print("Your chosen numbers are : " + str(user_list))
time.sleep(3)
return user_list
def is_list_equal(a, b):
if a == b:
print("CORRECT answer! :) ")
time.sleep(2)
print("See you next time !")
time.sleep(3)
return True
else:
print("This is a WRONG answer !")
time.sleep(2)
print("See you next time ! :)")
time.sleep(3)
return False
def play_memory_game(user_input):
print("****** Welcome to the Memory Game! ******" + "\n")
a = generate_sequence(user_input)
b = get_list_from_user(user_input)
if is_list_equal(a, b) == True:
return True
else:
return False
|
6,777 | 2ed0ae48e8fec2c92effcbb3e495a1a9f4636c27 | import flask
import flask_sqlalchemy
app = flask.Flask(__name__)
app.config.from_pyfile('settings.py')
db = flask_sqlalchemy.SQLAlchemy(app)
|
6,778 | 8f57e120a1a84eb0b9918128580c152aabc6a724 | from django.db import models
class UserData(models.Model):
username = models.CharField(max_length=24)
email = models.EmailField(max_length=32, blank=True, null=True)
password = models.CharField(max_length=32)
created_data = models.DateTimeField()
email_is_confirm = models.CharField(max_length=20, blank=True, null=True)
def __str__(self):
return "Login: {}, Email: {}, Data: {}.".format(self.login, self.email, self.created_data)
class Meta:
verbose_name = "Данные пользователя"
verbose_name_plural = "Данные пользователей"
class UserProfile(models.Model):
user_data = models.OneToOneField(UserData, on_delete=models.CASCADE)
first_name = models.CharField(max_length=24)
last_name = models.CharField(max_length=24)
def __str__(self):
return "Login: {}, Email: {}.".format(self.user_data.login, self.user_data.email)
class Meta:
verbose_name = "Профиль пользователя"
verbose_name_plural = "Профили пользователей"
|
6,779 | 033973ddc81a5fdf0e40009c4f321215fe3f4217 | class Solution(object):
def checkSubarraySum(self, nums, k):
if not nums or len(nums) == 1:
return False
sum_array = [0]*(len(nums)+1)
for i, num in enumerate(nums):
sum_array[i+1] = sum_array[i]+num
if k == 0:
if sum_array[-1] == 0:
return True
else:
return False
for i in range(1, len(sum_array)):
for j in range(i-1):
if not (sum_array[i]-sum_array[j])%k:
return True
return False |
6,780 | f531af47431055866db72f6a7181580da461853d | #!/usr/bin/python
from setuptools import setup, find_packages
import os
EXTRAS_REQUIRES = dict(
test=[
'pytest>=2.2.4',
'mock>=0.8.0',
'tempdirs>=0.0.8',
],
dev=[
'ipython>=0.13',
],
)
# Tests always depend on all other requirements, except dev
for k,v in EXTRAS_REQUIRES.iteritems():
if k == 'test' or k == 'dev':
continue
EXTRAS_REQUIRES['test'] += v
# Pypi package documentation
root = os.path.dirname(__file__)
path = os.path.join(root, 'README.rst')
with open(path) as fp:
long_description = fp.read()
setup(
name='linkins',
version='0.0.7.4',
description=(
'Links a directory structure and optionally executes '
'user-defined scripts at each level of the directory '
'hierarchy'
),
long_description=long_description,
author='Andres Buritica',
author_email='andres@thelinuxkid.com',
maintainer='Andres Buritica',
maintainer_email='andres@thelinuxkid.com',
url='https://github.com/thelinuxkid/linkins',
license='MIT',
packages = find_packages(),
test_suite='nose.collector',
install_requires=[
'setuptools',
],
extras_require=EXTRAS_REQUIRES,
entry_points={
'console_scripts': [
'linkins = linkins.cli:main',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7'
],
)
|
6,781 | ec200ee66e3c4a93bbd8e75f0e8b715f54b5479d | #
# In development by Jihye Sofia Seo https://www.linkedin.com/in/jihyeseo
# forked from the code of Al Sweigart
# http://inventwithpython.com/pygame/chapter10.html
# whose books are very helpful for learning Python and PyGame. Many thanks!
# Main change is that his version uses flood fill algorithm, which could not run for large boards.
# This file modified the algorithm.
#
# Flood-It is an NP hard problem http://arxiv.org/abs/1001.4420 for 3 colors or more.
# The goal of this project is to find an efficient algorithm for autoplay.
#
# Any comments are welcome at jihyeseo@post.harvard.edu
# upload: May 7 2016 Berlin Germany
#
import random, sys, webbrowser, copy, pygame
from pygame.locals import *
#sys.setrecursionlimit(1000000)
#FPS = 30
WINDOWWIDTH = 1920
WINDOWHEIGHT = 1000
boxSize = 20
PALETTEGAPSIZE = 5
PALETTESIZE = 30
boardWidth = 93
boardHeight = 49
# Creates a board data structure with random colors for each box.
board = []
conqueredAt = [[False for y in range(boardHeight)] for x in range(boardWidth)]
neverQueue = [[False for y in range(boardHeight)] for x in range(boardWidth)]
conqueredAt[0][0] = True
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
def buildQueue(): # add only boundaries
floodQueue = Queue()
for x in range(boardWidth):
for y in range(boardHeight):
if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):
noFrontier = True
if (x > 0) :
noFrontier = noFrontier & (conqueredAt[x-1][y])
if (x < boardWidth - 1):
noFrontier = noFrontier & (conqueredAt[x+1][y])
if (y > 0):
noFrontier = noFrontier & (conqueredAt[x][y-1])
if (y < boardHeight - 1):
noFrontier = noFrontier & (conqueredAt[x][y+1])
if noFrontier :
neverQueue[x][y] = True
else:
floodQueue.enqueue([x, y])
return floodQueue
# R G B
WHITE = (255, 255, 255)
DARKGRAY = ( 70, 70, 70)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
PURPLE = (255, 0, 255)
# The first color in each scheme is the background color, the next six are the palette colors.
COLORSCHEMES = ((150, 200, 255),
(97, 215, 164) , #lightGr
(0, 125, 50) ,#darkGr
(23, 149, 195) , # light ocean
(81, 85 , 141), # lightPur
(147, 3, 167) , # purple
(241, 109, 149), # jindalle
(255, 180, 115), # tangerine
(166, 147, 0), # tangerine?
(183, 182, 208), # gray
(68, 0, 0) # drak grey
)
bgColor = COLORSCHEMES[0]
paletteColors = COLORSCHEMES[1:]
def main():
global FPSCLOCK, DISPLAYSURF
pygame.init()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Flood it')
generateRandomBoard(boardWidth, boardHeight)
lastPaletteClicked = None
while True: # main game loop
paletteClicked = None
# Draw the screen.
DISPLAYSURF.fill(bgColor)
drawBoard()
drawPalettes()
pygame.display.update()
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
pygame.quit() # terminate if the KEYUP event was for the Esc key
sys.exit()
elif event.key == K_0:
paletteClicked = 9
elif event.key == K_1:
paletteClicked = 0
elif event.key == K_2:
paletteClicked = 1
elif event.key == K_3:
paletteClicked = 2
elif event.key == K_4:
paletteClicked = 3
elif event.key == K_5:
paletteClicked = 4
elif event.key == K_6:
paletteClicked = 5
elif event.key == K_7:
paletteClicked = 6
elif event.key == K_8:
paletteClicked = 7
elif event.key == K_9:
paletteClicked = 8
# pygame.event.post(event) # put the other KEYUP event objects back
paletteClicked = random.randint(0,9)
pygame.time.wait(50)
if paletteClicked != None and paletteClicked != lastPaletteClicked:
# a palette button was clicked that is different from the
# last palette button clicked (this check prevents the player
# from accidentally clicking the same palette twice)
lastPaletteClicked = paletteClicked
#if board[0][0] != paletteClicked :
floodFill(board[0][0], paletteClicked, buildQueue())
drawBoard()
pygame.display.update()
# FPSCLOCK.tick(FPS)
# pygame.display.update()
#FPSCLOCK.tick(FPS)
def generateRandomBoard(width, height):
for x in range(width):
column = []
for y in range(height):
column.append(random.randint(0, len(paletteColors) - 1))
board.append(column)
def drawBoard():
for x in range(boardWidth):
for y in range(boardHeight):
left, top = leftTopPixelCoordOfBox(x, y)
pygame.draw.rect(DISPLAYSURF, (paletteColors[board[x][y]]), (left, top, boxSize, boxSize))
DISPLAYSURF.blit(DISPLAYSURF, (0, 0))
def drawPalettes():
# Draws the six color palettes at the left of the screen.
numColors = len(paletteColors)
textSize = 30
font = pygame.font.Font(None, textSize)
for i in range(numColors):
top = 10 + (i * PALETTESIZE) + (i * PALETTEGAPSIZE)
left = 10
pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top, PALETTESIZE, PALETTESIZE))
textImg = font.render( str((i+1) % 10), 1, bgColor)
DISPLAYSURF.blit( textImg, (left+10 +0*(PALETTESIZE/2-textSize/2),top+7 +0*(PALETTESIZE/2-textSize/2)))
def floodFill(teamColor, newColor, queue):
while(queue.isEmpty() == False):
checkHere = queue.dequeue()
(x,y) = (checkHere[0],checkHere[1])
board[x][y] = newColor
conqueredAt[x][y] = True
if x > 0 :
(X,Y) = (x-1,y)
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y]) # on box to the left
if x < boardWidth - 1:
(X,Y) = (x+1,y)
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y]) # on box to the right
if y > 0:
(X,Y) = (x,y-1)
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y]) # on box to up
if y < boardHeight - 1:
(X,Y) = (x,y+1)
if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):
queue.enqueue([X, Y]) # on box to down
for x in range(boardWidth):
for y in range(boardHeight):
if conqueredAt[x][y] == True :
board[x][y] = newColor
def leftTopPixelCoordOfBox(boxx, boxy):
# Returns the x and y of the left-topmost pixel of the xth & yth box.
xmargin = int((WINDOWWIDTH - (boardWidth * boxSize)) / 2 + 23)
ymargin = int((WINDOWHEIGHT - (boardHeight * boxSize)) / 2 )
return (boxx * boxSize + xmargin, boxy * boxSize + ymargin)
if __name__ == '__main__':
main()
|
6,782 | 424a0e8a7a80e24aec4bdb9b8c84fd9a5e6090c6 | import logging
import time
import random
import pickle
import os
from sys import maxsize
import torch
from tensorboardX import SummaryWriter
from baselines.common.schedules import LinearSchedule
from abp.utils import clear_summary_path
from abp.models.feature_q_model import feature_q_model
from abp.adaptives.common.prioritized_memory.memory_gqf import ReplayBuffer_decom
import numpy as np
logger = logging.getLogger('root')
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
IntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
class SADQ_GQF(object):
"""Adaptive which uses the SADQ algorithm"""
def __init__(self, name, state_length, network_config, reinforce_config, feature_len, combine_decomposed_func, is_sigmoid = False, memory_resotre = True):
super(SADQ_GQF, self).__init__()
self.name = name
#self.choices = choices
self.network_config = network_config
self.reinforce_config = reinforce_config
self.memory = ReplayBuffer_decom(self.reinforce_config.memory_size)
self.learning = True
self.explanation = False
self.state_length = state_length
self.features = 0
self.feature_len = feature_len
# Global
self.steps = 0
self.reward_history = []
self.episode_time_history = []
self.best_reward_mean = -maxsize
self.episode = 0
self.feature_len = feature_len
self.features = None
self.reset()
self.memory_resotre = memory_resotre
reinforce_summary_path = self.reinforce_config.summaries_path + "/" + self.name
if not self.network_config.restore_network:
clear_summary_path(reinforce_summary_path)
else:
self.restore_state()
self.summary = SummaryWriter(log_dir=reinforce_summary_path)
self.eval_model = feature_q_model(name, state_length, self.feature_len, self.network_config.output_shape, network_config)
self.target_model = feature_q_model(name, state_length, self.feature_len, self.network_config.output_shape, network_config)
# self.target_model.eval_mode()
self.beta_schedule = LinearSchedule(self.reinforce_config.beta_timesteps,
initial_p=self.reinforce_config.beta_initial,
final_p=self.reinforce_config.beta_final)
self.epsilon_schedule = LinearSchedule(self.reinforce_config.epsilon_timesteps,
initial_p=self.reinforce_config.starting_epsilon,
final_p=self.reinforce_config.final_epsilon)
# def __del__(self):
# self.save()
# self.summary.close()
def should_explore(self):
self.epsilon = self.epsilon_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Epsilon' % self.name,
scalar_value=self.epsilon,
global_step=self.steps)
return random.random() < self.epsilon
def predict(self, state, isGreedy = False, is_random = False):
if self.learning:
self.steps += 1
# add to experience
if self.previous_state is not None and self.learning and self.current_reward is not None:
state_crr = np.unique(state, axis=0)
self.memory.add(self.previous_state,
None,
self.current_reward,
state_crr.reshape(-1, self.state_length), 0,
self.features)
# print("not final : {}".format(self.current_reward) )
# print(0, self.features)
if self.learning and self.should_explore() and not isGreedy:
q_values = None
fv = None
choice = random.choice(list(range(len(state))))
action = choice
else:
with torch.no_grad():
features_vector, q_values = self.eval_model.predict_batch(Tensor(state))
q_values = FloatTensor(q_values).view(-1)
_, choice = q_values.max(0)
action = choice
fv = features_vector[choice]
# print("q_value : {}".format(q_values))
# input()
if self.learning and self.steps % self.reinforce_config.replace_frequency == 0:
logger.debug("Replacing target model for %s" % self.name)
if self.reinforce_config.replace_frequency != 1:
self.target_model.replace(self.eval_model)
else:
self.target_model.replace_soft(self.eval_model)
# self.target_model.eval_mode()
if (self.learning and
self.steps > self.reinforce_config.update_start and
self.steps % self.reinforce_config.update_steps == 0):
self.update_time -= time.time()
self.update()
self.update_time += time.time()
self.current_reward = 0
self.previous_state = state[action]
#self.previous_action = action
return choice, fv#,q_values
def disable_learning(self, is_save = False):
logger.info("Disabled Learning for %s agent" % self.name)
if is_save:
# self.save()
self.save(force = True)
self.learning = False
self.episode = 0
def enable_learning(self):
logger.info("enabled Learning for %s agent" % self.name)
self.learning = True
self.reset()
def end_episode(self, state):
if not self.learning:
return
# print("end:")
# print(self.current_reward)
# input()
episode_time = time.time() - self.episode_time
self.reward_history.append(self.total_reward)
self.episode_time_history.append(episode_time)
total_time = sum(self.episode_time_history)
avg_time = total_time / len(self.episode_time_history)
logger.info("End of Episode %d, "
"Total reward %.2f, "
"Epsilon %.2f" % (self.episode + 1,
self.total_reward,
self.epsilon))
logger.debug("Episode Time: %.2fs (%.2fs), "
"Prediction Time: %.2f, "
"Update Time %.2f" % (episode_time,
avg_time,
self.prediction_time,
self.update_time))
self.episode += 1
self.summary.add_scalar(tag='%s/Episode Reward' % self.name,
scalar_value=self.total_reward,
global_step=self.episode)
self.memory.add(self.previous_state,
None,
self.current_reward,
state.reshape(-1, self.state_length), 1,
self.features)
# print("final : {}".format(self.current_reward) )
# input()
# print(1, self.features)
self.save()
self.reset()
def reset(self):
self.episode_time = time.time()
self.current_reward = 0
self.total_reward = 0
self.previous_state = None
self.previous_action = None
self.prediction_time = 0
self.update_time = 0
self.features = None
def restore_state(self):
restore_path = self.network_config.network_path + "/adaptive.info"
if self.network_config.network_path and os.path.exists(restore_path) and self.memory_resotre:
logger.info("Restoring state from %s" % self.network_config.network_path)
with open(restore_path, "rb") as file:
info = pickle.load(file)
self.steps = info["steps"]
# self.best_reward_mean = info["best_reward_mean"]
self.episode = info["episode"]
self.memory.load(self.network_config.network_path)
print("lenght of memeory: ", len(self.memory))
def save(self, force=False, appendix=""):
info = {
"steps": self.steps,
"best_reward_mean": self.best_reward_mean,
"episode": self.episode
}
if (len(self.reward_history) >= self.network_config.save_steps and
self.episode % self.network_config.save_steps == 0) or force:
total_reward = sum(self.reward_history[-self.network_config.save_steps:])
current_reward_mean = total_reward / self.network_config.save_steps
if force: #or current_reward_mean >= self.best_reward_mean:
print("*************saved*****************", current_reward_mean, self.best_reward_mean)
if not force:
self.best_reward_mean = current_reward_mean
logger.info("Saving network. Found new best reward (%.2f)" % total_reward)
self.eval_model.save_network(appendix = appendix)
self.target_model.save_network(appendix = appendix)
# self.eval_model.save_network()
# self.target_model.save_network()
with open(self.network_config.network_path + "/adaptive.info", "wb") as file:
pickle.dump(info, file, protocol=pickle.HIGHEST_PROTOCOL)
self.memory.save(self.network_config.network_path)
print("lenght of memeory: ", len(self.memory))
else:
logger.info("The best reward is still %.2f. Not saving" % self.best_reward_mean)
def reward(self, r):
self.total_reward += r
self.current_reward += r
def passFeatures(self, features):
self.features = features.copy()
return
def summary_test(self, reward, epoch):
self.summary.add_scalar(tag='%s/eval reward' % self.name,
scalar_value=reward, global_step=epoch * 40)
def summary_GVFs_loss(self, loss, epoch):
self.summary.add_scalar(tag='%s/GVFs loss' % self.name,
scalar_value=loss, global_step=epoch * 40)
def update(self):
if len(self.memory._storage) <= self.reinforce_config.batch_size:
return
# self.eval_model.train_mode()
beta = self.beta_schedule.value(self.steps)
self.summary.add_scalar(tag='%s/Beta' % self.name,
scalar_value=beta, global_step=self.steps)
if self.reinforce_config.use_prior_memory:
batch = self.memory.sample(self.reinforce_config.batch_size, beta)
(states, actions, reward, next_states,
is_terminal, weights, batch_idxes) = batch
self.summary.add_histogram(tag='%s/Batch Indices' % self.name,
values=Tensor(batch_idxes),
global_step=self.steps)
else:
batch = self.memory.sample(self.reinforce_config.batch_size)
(states, actions, reward, next_states, is_terminal, features_vector) = batch
states = FloatTensor(states)
# print(states.size())
# next_states = FloatTensor(next_states)
terminal = FloatTensor([1 if t else 0 for t in is_terminal])
reward = FloatTensor(reward)
features_vector = FloatTensor(features_vector)
batch_index = torch.arange(self.reinforce_config.batch_size,
dtype=torch.long)
# Current Q Values
feature_values, q_values = self.eval_model.predict_batch(states)
q_values = q_values.flatten()
q_max = []
f_max = []
for i, ns in enumerate(next_states):
feature_n, q_n = self.target_model.predict_batch(FloatTensor(ns).view(-1, self.state_length))
q_value_max, idx = q_n.max(0)
features_max = feature_n[idx]
q_max.append(q_value_max)
if self.network_config.version in ["v10", "v11"]:
# print(features_max)
# print(ns[idx, 63:67])
# print(states[i, 63:67])
# print(features_max.size(), FloatTensor(ns).view(-1, self.state_length).size(), states.size())
features_max[:, :3] = (features_max[:, :3] * ns[idx, 65]) / states[i, 65]
features_max[:, 3:6] = (features_max[:, 3:6] * ns[idx, 66]) / states[i, 66]
features_max[:, 6:9] = (features_max[:, 6:9] * ns[idx, 63]) / states[i, 63]
features_max[:, 9:12] = (features_max[:, 9:12] * ns[idx, 64]) / states[i, 64]
features_max[features_max == float('inf')] = 0
# print(features_max)
# input()
f_max.append(features_max.view(-1))
# if torch.sum(terminal == torch.sum(features_vector, dim = 1)) != len(terminal):
# print(terminal)
# print(features_vector)
# input()
q_max = torch.stack(q_max, dim = 1).view(-1)
f_max = torch.stack(f_max)
q_max = (1 - terminal) * q_max
f_max = (1 - terminal.view(-1, 1)) * f_max
q_target = reward + self.reinforce_config.discount_factor * q_max
f_target = features_vector + self.reinforce_config.discount_factor * f_max
# if torch.sum(reward).item() > 0:
# print(reward)
# print(feature_values)
# print(q_target)
# print(q_values)
# input()
# update model
if (torch.sum(feature_values != feature_values).item() + torch.sum(f_target != f_target)).item() > 0:
# print("1")
# print(features_vector)
# print("2")
# print(feature_values)
# print("3")
# print(f_target)
# print("4")
# print(f_max)
# print("5")
# print(states.tolist())
# input()
f_target[f_target != f_target] = 0
self.eval_model.fit(q_values, q_target, feature_values, f_target)
# Update priorities
if self.reinforce_config.use_prior_memory:
td_errors = q_values - q_target
new_priorities = torch.abs(td_errors) + 1e-6 # prioritized_replay_eps
self.memory.update_priorities(batch_idxes, new_priorities.data)
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, weight_dict):
self.eval_model.load_weight(weight_dict)
def load_model(self, model):
self.eval_model.replace(model)
def load_weight(self, new_feature_weights, new_q_weights):
self.eval_model.feautre_model.load_state_dict(new_feature_weights)
self.eval_model.q_model.load_state_dict(new_q_weights) |
6,783 | f44a8837056eb77fbf0ff37b9c57891cc3a3d6b2 | import logging
from datetime import datetime
from preprocessing import death_preprocessing
from preprocessing_three_month import death_preprocessing_three_month
from death_rule_first_55 import death_rule_first_55
from death_rule_second import death_rule_second_new
from death_escalation import death_escalation
if __name__ == '__main__':
logging.basicConfig(filename='logfile.log', filemode='a', format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO)
logging.info('Start of the mortality analysis algorithm')
start_time_ALL = datetime.now()
print('Start of the mortality analysis algorithm')
try:
print('The month is over. Start forming tasks ...')
# death_preprocessing(save_to_sql=True, save_to_excel=False)
death_preprocessing_three_month(save_to_sql=True, save_to_excel=False)
death_rule_first_55(save_to_sql=True, save_to_excel=True)
death_rule_second_new(save_to_sql=True, save_to_excel=True)
death_escalation(save_to_sql=True, save_to_excel=False)
print(f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}')
logging.info(f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}')
except Exception as e:
print('The execution of the mortality analysis algorithm was not completed due to an error')
logging.exception('Exception occurred')
logging.info('The execution of the mortality analysis algorithm was not completed due to an error')
|
6,784 | cc58e3944ee2bfb55cc2867395782a94c196e635 | ########################################################################################################################
# DEVELOPER README: #
# This is the main script, where the GUI is initialised from. All of the main layout objects live in their own scripts #
# under ./gui_scripts (i.e. the tab content). The settings and preferences script sets up all of the directory paths #
# and contains dictionaries defining the top menu, push buttons and the tables held in the main tabs. The layout #
# script contains functions for performing simple layout tasks, such as adding a combobox, and contains init. #
# functions for all of the main layout functions. #
# #
# In the future, the functions associated with buttons and frames etc. should be moved into the relevant script, but #
# this is a bit more complicated. For now, they are separated out into sections within this script. The only GUI stuff #
# going on in here is calling the initialisation functions. To change the layout of a tab, edit it in it's own script, #
# and add any new functions in this script, in the relevant section. (If there is one yet) #
# #
# There's still a lot of cleaning up to be done in the future... #
########################################################################################################################
# solve gtk startup error
#import gtk
#gtk.set_interactive(False)
import base64
import getpass
import glob
import math
import multiprocessing
import pickle
import subprocess
import sys, os
import webbrowser
from datetime import datetime
from PyQt4 import QtGui, QtCore, QtWebKit
sys.path.append(os.path.join(os.getenv('XChemExplorer_DIR'), 'lib'))
sys.path.append(os.path.join(os.getenv('XChemExplorer_DIR'), 'web'))
sys.path.append(os.path.join(os.getenv('XChemExplorer_DIR'), 'gui_scripts'))
from settings_preferences import *
from layout import *
from stylesheet import set_stylesheet
from XChemUtils import parse
import XChemThread
import XChemDB
import XChemPANDDA
import XChemToolTips
import XChemMain
import XChemPlots
import XChemLog
import XChemProcess
import XChemDeposit
import XChemWeb
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
class XChemExplorer(QtGui.QApplication):
def __init__(self, args):
# init a QApplication object to hold XCE
QtGui.QApplication.__init__(self, args)
# start GUI
self.start_GUI()
# set stylesheet - how the gui looks
set_stylesheet(self)
self.exec_()
def start_GUI(self):
# check http://doc.qt.io/qt-4.8/stylesheet-customizing.html#the-box-model
# This needs moving somewhere more appropriate...
self.headlineLabelfont = QtGui.QFont("Arial", 20, QtGui.QFont.Bold)
setup().settings(self)
setup().preferences(self)
setup().tables(self)
self.layout_funcs = LayoutFuncs()
# GUI setup
self.window = QtGui.QWidget()
self.window.setWindowTitle("XChemExplorer")
self.screen = QtGui.QDesktopWidget().screenGeometry()
LayoutObjects(self).workflow(self)
LayoutObjects(self).main_layout(self)
LayoutFuncs().add_widgets_layouts(self)
self.checkLabXChemDir()
if os.path.isfile(os.path.join(self.database_directory, self.data_source_file)):
self.backup_soakDB()
def backup_soakDB(self):
XChemMain.backup_soakDB(os.path.join(self.database_directory, self.data_source_file),self.xce_logfile)
def checkLabXChemDir(self):
dirCheck = QtGui.QMessageBox()
dirCheckLayout = dirCheck.layout()
vbox = QtGui.QVBoxLayout()
try:
warning = (
'Are you sure you want to launch XCE here:\n\n'
+self.labxchem_directory_current+'\n\n'
'If this is not where you should be running XCE, please close!\n'
)
except AttributeError:
return
vbox.addWidget(QtGui.QLabel(warning))
dirCheckLayout.addLayout(vbox, 0, 0)
dirCheck.exec_();
# function to update datasource
def datasource_menu_reload_samples(self):
self.update_log.insert(
'reading samples from data source: ' + os.path.join(self.database_directory, self.data_source_file))
self.update_status_bar(
'reading samples from data source: ' + os.path.join(self.database_directory, self.data_source_file))
self.update_header_and_data_from_datasource()
self.update_all_tables()
self.overview_datasource_table.resizeColumnsToContents()
# function to create new datasource
def create_new_data_source(self):
file_name = str(QtGui.QFileDialog.getSaveFileName(self.window, 'Save file', self.database_directory))
# make sure that the file always has .sqlite extension
if file_name.rfind('.') != -1:
file_name = file_name[:file_name.rfind('.')] + '.sqlite'
else:
file_name = file_name + '.sqlite'
self.db = XChemDB.data_source(file_name)
print('==> XCE: creating new data source')
self.db.create_empty_data_source_file()
self.db.create_missing_columns()
self.database_directory = file_name[:file_name.rfind('/')]
self.data_source_file = file_name[file_name.rfind('/') + 1:]
self.data_source_file_label.setText(os.path.join(self.database_directory, self.data_source_file))
self.settings['database_directory'] = self.database_directory
self.settings['data_source'] = self.data_source_file
self.data_source_set = True
self.datasource_menu_reload_samples()
####################################################################################################################
# #
# DATASETS TAB #
# #
####################################################################################################################
def continously_check_for_new_data_collection(self, state):
self.timer_to_check_for_new_data_collection.timeout.connect(
lambda: self.check_for_new_autoprocessing_or_rescore(False))
if state == QtCore.Qt.Checked:
print('==> XCE: checking automatically every 120s for new data collection')
self.timer_to_check_for_new_data_collection.start(120000)
else:
print('==> XCE: stopped checking for new data collections')
self.timer_to_check_for_new_data_collection.stop()
def target_selection_combobox_activated(self, text):
self.target = str(text)
def select_diffraction_data_directory(self):
self.diffraction_data_directory = str(QtGui.QFileDialog.getExistingDirectory(self.window, "Select Directory"))
self.diffraction_data_dir_label.setText(self.diffraction_data_directory)
self.settings['diffraction_data_directory'] = self.diffraction_data_directory
self.update_log.insert('setting diffraction data directory to ' + self.diffraction_data_directory)
def search_for_datasets(self):
self.update_log.insert('search diffraction data directory for datasets...')
print('will search ' + str(self.diffraction_data_directory))
self.work_thread = XChemMain.find_diffraction_image_directory_fast(self.diffraction_data_directory)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_datasets_reprocess_table"),
self.update_datasets_reprocess_table)
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
#self.work_thread = self.update_datasets_reprocess_table(self.diffraction_data_directory)
def translate_datasetID_to_sampleID(self):
translate = QtGui.QMessageBox()
translateLayout = translate.layout()
self.translate_datasetID_to_sampleID_file = '-'
vbox = QtGui.QVBoxLayout()
button = QtGui.QPushButton('Open CSV')
button.clicked.connect(self.open_csv_file_translate_datasetID_to_sampleID)
vbox.addWidget(button)
self.translate_datasetID_to_sampleID_csv_label = QtGui.QLabel(self.translate_datasetID_to_sampleID_file)
vbox.addWidget(self.translate_datasetID_to_sampleID_csv_label)
translateLayout.addLayout(vbox, 0, 0)
translate.addButton(QtGui.QPushButton('OK'), QtGui.QMessageBox.YesRole)
translate.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
reply = translate.exec_();
if reply == 0:
if os.path.isfile(self.translate_datasetID_to_sampleID_file):
trans_dict = {}
for line in open(self.translate_datasetID_to_sampleID_file):
if len(line.split(',')) == 2:
dataset = line.split(',')[0]
new_sample_id = line.split(',')[1]
trans_dict[dataset] = new_sample_id
if len(trans_dict) >= 1:
allRows = self.datasets_reprocess_table.rowCount()
for row in xrange(0, allRows):
dataset_id = str(self.datasets_reprocess_table.item(row, 0).text())
sample_id = str(self.datasets_reprocess_table.item(row, 1).text())
if dataset_id in trans_dict:
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(trans_dict[dataset_id])
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.datasets_reprocess_table.setItem(row, 1, cell_text)
self.update_log.insert(
'dataset: {0!s} -> changing sampleID to: {1!s}'.format(dataset_id,
trans_dict[dataset_id]))
def select_sample_for_xia2(self):
indexes = self.datasets_reprocess_table.selectionModel().selectedRows()
for index in sorted(indexes):
xtal = str(self.datasets_reprocess_table.item(index.row(), 1).text())
print(xtal, self.diffraction_data_table_dict[xtal][0])
self.update_log.insert('{0!s} marked for reprocessing'.format(index.row()))
self.diffraction_data_table_dict[xtal][0].setChecked(True)
def select_reprocess_reference_mtz(self):
self.update_log.insert('trying to set new reference mtz file for reprocessing with xia2')
file_name = str(QtGui.QFileDialog.getOpenFileName(self.window, 'Select file', self.database_directory))
if os.path.isfile(file_name):
if file_name.endswith('.mtz'):
self.diffraction_data_reference_mtz = file_name
self.update_log.insert(
'new reference file for data processing with xia2: ' + self.diffraction_data_reference_mtz)
self.reprocess_reference_mtz_file_label.setText(self.diffraction_data_reference_mtz)
else:
self.update_log.insert('this does not seem to be a mtz file: ' + file_name)
def check_for_new_autoprocessing_or_rescore(self, rescore_only):
self.update_log.insert('checking for new data collection')
start_thread = False
if rescore_only:
# first pop up a warning message as this will overwrite all user selections
msgBox = QtGui.QMessageBox()
msgBox.setText("*** WARNING ***\nThis will overwrite all your manual selections!\nDo you want to continue?")
msgBox.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply == 0:
start_thread = True
else:
start_thread = False
else:
start_thread = True
if start_thread:
if self.target == '=== SELECT TARGET ===':
msgBox = QtGui.QMessageBox()
warning = ('*** WARNING ***\n'
'Please select a target or\n'
'select "=== project directory ===" if you want to read reprocessed results\n'
'In case target list is empty, make sure that you have selected the actual\n'
'data collection visit (e.g. /dls/i04-1/data/2018/lb18145-70)' )
msgBox.setText(warning)
start_thread = False
# msgBox.setText(warning)
# msgBox.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
# msgBox.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.RejectRole)
# reply = msgBox.exec_();
# if reply == 0:
# start_thread = True
# else:
# start_thread = False
# else:
# start_thread = True
if start_thread:
self.work_thread = XChemThread.read_autoprocessing_results_from_disc(self.visit_list,
self.target,
self.reference_file_list,
self.database_directory,
self.data_collection_dict,
self.preferences,
self.datasets_summary_file,
self.initial_model_directory,
rescore_only,
self.acceptable_low_resolution_limit_for_data,
os.path.join(self.database_directory,
self.data_source_file),
self.xce_logfile)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("create_widgets_for_autoprocessing_results_only"),
self.create_widgets_for_autoprocessing_results_only)
self.work_thread.start()
#################################################################################################################
#
#
#
# => for new module from hell
# > start
def update_gdaLog_parsing_instructions_and_score(self, gdaLogInstructions):
self.gdaLogInstructions = gdaLogInstructions
self.select_best_autoprocessing_result()
def read_pinIDs_from_gda_logs(self):
self.update_log.insert('reading pinIDs from gda logfiles...')
visit, beamline = XChemMain.getVisitAndBeamline(self.beamline_directory)
self.work_thread = XChemThread.read_pinIDs_from_gda_logs(beamline,
visit,
os.path.join(
self.database_directory,
self.data_source_file),
self.gdaLogInstructions,
self.xce_logfile)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("update_gdaLog_parsing_instructions_and_score"),
self.update_gdaLog_parsing_instructions_and_score)
self.work_thread.start()
def check_for_new_autoprocessing_results(self):
self.update_log.insert('checking for new data collection')
if self.target == '=== SELECT TARGET ===':
self.update_log.error('NO TARGET SELECTED, PLEASE SELECT A TARGET AND TRY AGAIN!')
start_thread = False
elif self.target == '=== project directory ===':
processedDir = self.initial_model_directory
start_thread = True
# elif self.read_agamemnon.isChecked():
# tmp = '/'.join(self.beamline_directory.split('/')[:6])
# processedDir = tmp[:tmp.rfind('-')]
## processedDir = os.path.join(self.beamline_directory[:self.beamline_directory.rfind('-') + 1] + '*/processed/agamemnon/'+self.target)
## processedDir = os.path.join(self.beamline_directory[:self.beamline_directory.rfind('-') + 1] + '*/processed/*/'+self.target)
# start_thread = True
else:
processedDir = os.path.join(self.beamline_directory, 'processed', self.target)
start_thread = True
if start_thread:
# processedDir=os.path.join(self.beamline_directory,'processed',self.target)
self.work_thread = XChemThread.read_write_autoprocessing_results_from_to_disc(processedDir,
os.path.join(
self.database_directory,
self.data_source_file),
self.initial_model_directory,
self.xce_logfile,
self.target,
self.read_agamemnon.isChecked())
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("read_pinIDs_from_gda_logs"),
self.read_pinIDs_from_gda_logs)
self.work_thread.start()
def select_best_autoprocessing_result(self):
if self.rescore:
# first pop up a warning message as this will overwrite all user selections
msgBox = QtGui.QMessageBox()
msgBox.setText("*** WARNING ***\nThis will overwrite all your manual selections!\nDo you want to continue?")
msgBox.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply != 0:
start_thread = False
else:
start_thread = True
else:
start_thread = True
if start_thread:
self.update_log.insert('selecting best autoprocessing result')
self.update_log.insert('samples where user made manual changes will be ignored!')
if self.target == '=== project directory ===':
processedDir = self.initial_model_directory
else:
processedDir = os.path.join(self.beamline_directory, 'processed', self.target)
visit,beamline = XChemMain.getVisitAndBeamline(processedDir)
if self.read_agamemnon.isChecked():
visit = []
for v in glob.glob(
os.path.join(self.beamline_directory[:self.beamline_directory.rfind('-') + 1] + '*')):
visit.append(v[v.rfind('/') + 1:])
self.work_thread = XChemThread.choose_autoprocessing_outcome(os.path.join(self.database_directory,
self.data_source_file),
visit,
self.reference_file_list,
self.preferences,
self.initial_model_directory,
self.rescore,
self.xce_logfile,
self.read_agamemnon.isChecked())
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("populate_datasets_summary_table_NEW"),
self.populate_datasets_summary_table_NEW)
self.work_thread.start()
# < end
###################################################################################################################
####################################################################################################################
# #
# MAPS TAB #
# #
####################################################################################################################
def set_new_reference_if_applicable(self):
print('hallo')
reference_root = str(self.reference_file_selection_combobox.currentText())
pg_ref = ''
ucVol_ref = 0.0
for reference in self.reference_file_list:
print(reference[0], reference_root)
if reference[0] == reference_root:
pg_ref = reference[5]
ucVol_ref = reference[4]
break
if ucVol_ref == 0.0:
self.update_log.insert('cannot set reference file since unit cell volume of reference pdb is 0!')
return
for xtal in self.initial_model_dimple_dict:
reference_file_selection_combobox = self.initial_model_dimple_dict[xtal][1]
self.populate_reference_combobox(reference_file_selection_combobox)
db_dict = self.xtal_db_dict[xtal]
pg_xtal = db_dict['DataProcessingPointGroup']
ucVol_xtal = db_dict['DataProcessingUnitCellVolume']
try:
difference = math.fabs(1 - (float(ucVol_xtal) / float(ucVol_ref))) * 100
except ValueError:
self.update_log.insert(xtal + ' -> cannot calculate unit cell volume difference')
continue
if pg_xtal == pg_ref and difference < self.allowed_unitcell_difference_percent:
print(xtal, pg_xtal, ucVol_xtal)
index = reference_file_selection_combobox.findText(reference_root, QtCore.Qt.MatchFixedString)
reference_file_selection_combobox.setCurrentIndex(index)
self.update_log.insert(xtal + ' -> setting ' + reference_root + ' as input PDB file for DIMPLE')
def refresh_reference_file_list(self):
self.reference_file_list = self.get_reference_file_list(' ')
self.populate_reference_combobox(self.reference_file_selection_combobox)
def on_context_menu_initial_model(self, point):
# show context menu
self.popMenu_for_maps_table.exec_(self.sender().mapToGlobal(point))
####################################################################################################################
# #
# PANDDA TAB #
# #
####################################################################################################################
def select_pandda_input_template(self):
mtzin = ''
filepath_temp = QtGui.QFileDialog.getOpenFileNameAndFilter(self.window, 'Select Example PDB or MTZ File',
self.initial_model_directory, '*.pdb;;*.mtz')
filepath = str(tuple(filepath_temp)[0])
pdbin = filepath.split('/')[-1]
if filepath.endswith('.pdb'):
pdbin = filepath.split('/')[-1]
mtzin_temp = pdbin.replace('.pdb', '.mtz')
if os.path.isfile(filepath.replace(pdbin, mtzin_temp)):
mtzin = mtzin_temp
else:
mtzin = ''
if filepath.endswith('.mtz'):
mtzin = filepath.split('/')[-1]
pdbin_temp = pdbin.replace('.mtz', '.pdb')
if os.path.isfile(filepath.replace(mtzin, pdbin_temp)):
pdbin = pdbin_temp
else:
pdbin = ''
try:
self.pandda_input_data_dir_entry.setText(
'/'+os.path.join(*filepath.split('/')[0:len(filepath.split('/'))-2]))
except TypeError:
self.update_log.error('directory selection invalid')
# if len(filepath.split('/')) - len(self.initial_model_directory.split('/')) == 2:
# self.pandda_input_data_dir_entry.setText(os.path.join(self.initial_model_directory, '*'))
# elif len(filepath.split('/')) - len(self.initial_model_directory.split('/')) > 2:
# subdir = os.path.join(
# *filepath.split('/')[len(self.initial_model_directory.split('/')) + 1:len(filepath.split('/')) - 1])
# self.pandda_input_data_dir_entry.setText(os.path.join(self.initial_model_directory, '*', subdir))
# else:
# pass
self.pandda_pdb_style_entry.setText(pdbin)
self.pandda_mtz_style_entry.setText(mtzin)
def change_pandda_spg_label(self):
combo_text = str(self.pandda_reference_file_selection_combobox.currentText())
for file in self.reference_file_list:
if file[0] == combo_text:
self.pandda_reference_file_spg_label.setText(file[1])
break
def on_context_menu_pandda(self, point):
# show context menu
self.popMenu_for_pandda_table.exec_(self.sender().mapToGlobal(point))
####################################################################################################################
# #
# DEPO TAB #
# #
####################################################################################################################
def export_to_html(self):
XChemWeb.export_to_html(self.html_export_directory,
self.initial_model_directory,
os.path.join(self.database_directory, self.data_source_file),
self.xce_logfile).prepare('0')
def export_to_html_CompChem(self):
XChemWeb.export_to_html(self.html_export_directory,
self.initial_model_directory,
os.path.join(self.database_directory, self.data_source_file),
self.xce_logfile).prepare('4')
def export_to_html_deposition_ready(self):
XChemWeb.export_to_html(self.html_export_directory,
self.initial_model_directory,
os.path.join(self.database_directory, self.data_source_file),
self.xce_logfile).prepare('5')
# self.update_log.insert('exporting contents of SQLite database into ' + self.html_export_directory)
# os.system(
# 'ccp4-python ' + os.getenv('XChemExplorer_DIR') + '/web/process_sqlite.py -t Summary -s ' + os.path.join(
# self.database_directory, self.data_source_file) + ' -d ' + self.html_export_directory)
# XChemWeb.create_ICM_input_file(self.html_export_directory,
# os.path.join(self.database_directory, self.data_source_file))
# self.update_log.insert('open ICMpro:')
# self.update_log.insert('/dls/science/groups/i04-1/software/icm-3.8-5/icm64 -g')
# self.update_log.insert('open file browser and navigate to ' + self.html_export_directory)
# self.update_log.insert('drag and drop dsEvent_sqlite.icm into the main window')
# self.update_log.insert('the script will appear in the Workspace Panel')
# self.update_log.insert('right click on the script and select RUN')
# self.update_log.insert('be patient, this may take a while, depending on the number of events')
# self.status_bar.showMessage('please check terminal window for further information')
# def select_ground_state_pdb(self):
# p = QtGui.QFileDialog.getOpenFileNameAndFilter(self.window, 'Select File', os.getcwd(),'*.pdb')
# pdb = str(tuple(p)[0])
# self.ground_state_pdb_button_label.setText(pdb)
def select_ground_state_mtz(self):
m = QtGui.QFileDialog.getOpenFileNameAndFilter(self.window, 'Select File', os.getcwd(),'*.mtz')
mtz = str(tuple(m)[0])
self.ground_state_mtz_button_label.setText(mtz)
def add_ground_state_db(self):
pdb, mtz = self.auto_select_ground_state_reference_PDB()
if pdb != None:
db_dict = {'DimplePANDDApath': self.panddas_directory,
'PDB_file': pdb,
'MTZ_file': mtz}
self.db.create_or_remove_missing_records_in_depositTable(self.xce_logfile, 'ground_state', 'ground_state',
db_dict)
else:
self.update_log.error('could not find a suitable reference file; see messages above!')
def auto_select_ground_state_reference_PDB(self):
pdb = None
mtz = None
xtalList = []
for dirs in glob.glob(os.path.join(self.panddas_directory,'processed_datasets','*')):
xtal = dirs[dirs.rfind('/')+1:]
if os.path.isfile(os.path.join(dirs,xtal+'-pandda-input.pdb')):
pdbHeader = parse().PDBheader(os.path.join(dirs,xtal+'-pandda-input.pdb'))
try:
xtalList.append( [xtal, float(pdbHeader['Rfree']) , float(pdbHeader['ResolutionHigh']) ] )
except ValueError:
self.update_log.error('%s: cannot read Rfree or Resolution from PDB header; skipping...')
pass
self.update_log.insert('found %s PDB files in %s' %(str(len(xtalList)),os.path.join(self.panddas_directory,'processed_datasets')))
if len(xtalList) >= 10:
self.update_log.insert('sorting PDBs by Rfree and selecting the 10 with lowest value')
rfree = sorted(xtalList, key=lambda x: x[1])[:10]
self.update_log.insert('top 10 PDB files with lowest Rfree:')
for item in rfree:
self.update_log.insert('%s: Rfree = %s | Resolution = %s' %(item[0],str(round(item[1],3)),str(round(item[2],2))))
self.update_log.insert('selecting PDB with highest resolution')
reso = sorted(rfree, key=lambda x: x[2])[:1]
self.update_log.insert('selected the following PDB file: %s: Rfree = %s | Resolution = %s' %(reso[0][0],str(round(reso[0][1],3)),str(round(reso[0][2],2))))
pdb = os.path.join(self.panddas_directory,'processed_datasets',reso[0][0],reso[0][0]+'-pandda-input.pdb')
mtz = os.path.join(self.panddas_directory,'processed_datasets',reso[0][0],reso[0][0]+'-pandda-input.mtz')
else:
self.update_log.error('found less than 10 valid PDB files in %s' %os.path.join(self.panddas_directory,'processed_datasets'))
return pdb, mtz
def prepare_ground_state_mmcif(self):
self.update_log.insert('preparing mmcif file for apo structure deposition')
self.prepare_models_for_deposition_ligand_bound('ground_state')
def open_icm(self):
self.update_log.insert('starting ICM...')
self.work_thread = XChemThread.start_ICM(self.html_export_directory)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def prepare_files_for_zenodo_upload(self):
self.update_log.insert('preparing files for ZENODO upload...')
os.system('ccp4-python ' + os.getenv(
'XChemExplorer_DIR') + '/helpers/prepare_for_zenodo_upload.py ' + self.html_export_directory)
def update_html_for_zenodo_upload(self):
try:
uploadID = int(self.zenodo_upload_id_entry.text())
self.update_log.insert('updating html files for ZENODO upload,...')
self.update_log.insert('ZENODO upload = ' + str(uploadID))
os.system('ccp4-python ' + os.getenv(
'XChemExplorer_DIR') + '/helpers/prepare_for_zenodo_upload.py {0!s} {1!s}'.format(
self.html_export_directory, uploadID))
except ValueError:
self.update_log.insert('zenodo upload ID must be an integer!')
####################################################################################################################
# #
# SETTINGS TAB #
# #
####################################################################################################################
def settings_button_clicked(self):
if self.sender().text() == 'Select Project Directory':
self.initial_model_directory = str(QtGui.QFileDialog.getExistingDirectory(self.window, "Select Directory"))
self.initial_model_directory_label.setText(self.initial_model_directory)
self.pandda_input_data_dir_entry.setText(self.initial_model_directory)
self.settings['initial_model_directory'] = self.initial_model_directory
if self.sender().text() == 'Select Reference Structure Directory':
reference_directory_temp = str(QtGui.QFileDialog.getExistingDirectory(self.window, "Select Directory"))
if reference_directory_temp != self.reference_directory:
self.reference_directory = reference_directory_temp
self.update_reference_files(' ')
self.reference_directory_label.setText(self.reference_directory)
self.settings['reference_directory'] = self.reference_directory
if self.sender().text() == 'Select Data Source File':
filepath_temp = QtGui.QFileDialog.getOpenFileNameAndFilter(self.window, 'Select File',
self.database_directory, '*.sqlite')
filepath = str(tuple(filepath_temp)[0])
self.data_source_file = filepath.split('/')[-1]
self.database_directory = filepath[:filepath.rfind('/')]
self.settings['database_directory'] = self.database_directory
self.settings['data_source'] = os.path.join(self.database_directory, self.data_source_file)
write_enabled = self.check_write_permissions_of_data_source()
if not write_enabled:
self.data_source_set = False
else:
self.data_source_set = True
self.data_source_file_label.setText(os.path.join(self.database_directory, self.data_source_file))
self.db = XChemDB.data_source(os.path.join(self.database_directory, self.data_source_file))
self.db.create_missing_columns()
self.datasource_menu_reload_samples()
if self.sender().text() == 'Select Data Collection Directory':
dir_name = str(QtGui.QFileDialog.getExistingDirectory(self.window, "Select Directory"))
if dir_name != self.beamline_directory:
self.beamline_directory = dir_name
self.target_list, self.visit_list = XChemMain.get_target_and_visit_list(self.beamline_directory,self.read_agamemnon.isChecked())
self.populate_target_selection_combobox(self.target_selection_combobox)
self.beamline_directory_label.setText(self.beamline_directory)
self.settings['beamline_directory'] = self.beamline_directory
if self.sender().text() == 'Select Existing\nCollection Summary File':
if self.datasets_summary_file != '':
filepath_temp = QtGui.QFileDialog.getOpenFileNameAndFilter(self.window, 'Select File',
self.datasets_summary_file[
:self.datasets_summary_file.rfind(
'/')], '*.pkl')
else:
filepath_temp = QtGui.QFileDialog.getOpenFileNameAndFilter(self.window, 'Select File', os.getcwd(),
'*.pkl')
filepath = str(tuple(filepath_temp)[0])
self.datasets_summary_file = filepath
self.datasets_summary_file_label.setText(self.datasets_summary_file)
self.settings['datasets_summary'] = self.datasets_summary_file
if self.sender().text() == 'Assign New\nCollection Summary File':
if self.datasets_summary_file != '':
file_name = str(QtGui.QFileDialog.getSaveFileName(self.window, 'New file',
self.datasets_summary_file[
:self.datasets_summary_file.rfind('/')]))
else:
file_name = str(QtGui.QFileDialog.getSaveFileName(self.window, 'New file', self.current_directory))
# make sure that the file always has .pkl extension
if str(file_name).rfind('.') != -1:
file_name = file_name[:file_name.rfind('.')] + '.pkl'
else:
file_name = file_name + '.pkl'
self.datasets_summary_file = file_name
self.datasets_summary_file_label.setText(self.datasets_summary_file)
self.settings['datasets_summary'] = self.datasets_summary_file
if self.sender().text() == 'Select CCP4_SCR Directory':
self.ccp4_scratch_directory = str(QtGui.QFileDialog.getExistingDirectory(self.window, "Select Directory"))
self.ccp4_scratch_directory_label.setText(self.ccp4_scratch_directory)
self.settings['ccp4_scratch'] = self.ccp4_scratch_directory
if self.sender().text() == 'Select PanDDA Directory':
self.panddas_directory = str(QtGui.QFileDialog.getExistingDirectory(self.window, "Select Directory"))
self.panddas_directory_label.setText(self.panddas_directory)
self.pandda_output_data_dir_entry.setText(self.panddas_directory)
self.ground_state_pandda_directory_label.setText(self.panddas_directory)
print('PANDDA', self.panddas_directory)
self.settings['panddas_directory'] = self.panddas_directory
self.layout_funcs.pandda_html(self)
if self.sender().text() == 'Select HTML Export Directory':
self.html_export_directory = str(QtGui.QFileDialog.getExistingDirectory(self.window, "Select Directory"))
self.html_export_directory_label.setText(self.html_export_directory)
self.settings['html_export_directory'] = self.html_export_directory
if self.sender().text() == 'Select Group deposition Directory':
self.group_deposit_directory = str(QtGui.QFileDialog.getExistingDirectory(self.window, "Select Directory"))
self.group_deposition_directory_label.setText(self.group_deposit_directory)
self.settings['group_deposit_directory'] = self.group_deposit_directory
#self.datasource_menu_reload_samples()
######################################### sort stuff below here ####################################################
def select_sample_for_dimple(self):
indexes = self.maps_table.selectionModel().selectedRows()
for index in sorted(indexes):
xtal = str(self.maps_table.item(index.row(), 0).text())
self.update_log.insert('{0!s} is marked for DIMPLE'.format(index.row()))
self.initial_model_dimple_dict[xtal][0].setChecked(True)
def update_summary_plot(self):
if self.data_source_set:
XChemPlots.summary_plot(os.path.join(self.database_directory, self.data_source_file),
self.overview_axes).update_overview()
self.overview_canvas.draw()
def show_preferences(self):
preferences = QtGui.QMessageBox()
preferencesLayout = preferences.layout()
vbox = QtGui.QVBoxLayout()
settings_hbox_filename_root = QtGui.QHBoxLayout()
filename_root_label = QtGui.QLabel('filename root:')
settings_hbox_filename_root.addWidget(filename_root_label)
filename_root_input = QtGui.QLineEdit()
filename_root_input.setFixedWidth(400)
filename_root_input.setText(str(self.filename_root))
filename_root_input.textChanged[str].connect(self.change_filename_root)
settings_hbox_filename_root.addWidget(filename_root_input)
vbox.addLayout(settings_hbox_filename_root)
settings_hbox_adjust_allowed_unit_cell_difference = QtGui.QHBoxLayout()
adjust_allowed_unit_cell_difference_label = QtGui.QLabel(
'Max. Allowed Unit Cell Difference between Reference and Target (%):')
settings_hbox_adjust_allowed_unit_cell_difference.addWidget(adjust_allowed_unit_cell_difference_label)
adjust_allowed_unit_cell_difference = QtGui.QLineEdit()
adjust_allowed_unit_cell_difference.setFixedWidth(200)
adjust_allowed_unit_cell_difference.setText(str(self.allowed_unitcell_difference_percent))
adjust_allowed_unit_cell_difference.textChanged[str].connect(self.change_allowed_unitcell_difference_percent)
settings_hbox_adjust_allowed_unit_cell_difference.addWidget(adjust_allowed_unit_cell_difference)
vbox.addLayout(settings_hbox_adjust_allowed_unit_cell_difference)
settings_hbox_acceptable_low_resolution_limit = QtGui.QHBoxLayout()
adjust_acceptable_low_resolution_limit_label = QtGui.QLabel(
'Acceptable low resolution limit for datasets (in Angstrom):')
settings_hbox_acceptable_low_resolution_limit.addWidget(adjust_acceptable_low_resolution_limit_label)
adjust_acceptable_low_resolution_limit = QtGui.QLineEdit()
adjust_acceptable_low_resolution_limit.setFixedWidth(200)
adjust_acceptable_low_resolution_limit.setText(str(self.acceptable_low_resolution_limit_for_data))
adjust_acceptable_low_resolution_limit.textChanged[str].connect(self.change_acceptable_low_resolution_limit)
settings_hbox_acceptable_low_resolution_limit.addWidget(adjust_acceptable_low_resolution_limit)
vbox.addLayout(settings_hbox_acceptable_low_resolution_limit)
vbox_data = QtGui.QVBoxLayout()
vbox_data.addWidget(
QtGui.QLabel('Select amount of processed data you wish to copy to initial_model directory:'))
self.preferences_data_to_copy_combobox = QtGui.QComboBox()
for item in self.preferences_data_to_copy:
self.preferences_data_to_copy_combobox.addItem(item[0])
self.preferences_data_to_copy_combobox.currentIndexChanged.connect(
self.preferences_data_to_copy_combobox_changed)
vbox_data.addWidget(self.preferences_data_to_copy_combobox)
vbox.addLayout(vbox_data)
vbox_select = QtGui.QVBoxLayout()
vbox_select.addWidget(QtGui.QLabel('Dataset Selection Mechanism:'))
self.preferences_selection_mechanism_combobox = QtGui.QComboBox()
for item in self.preferences_selection_mechanism:
self.preferences_selection_mechanism_combobox.addItem(item)
self.preferences_selection_mechanism_combobox.currentIndexChanged.connect(
self.preferences_selection_mechanism_combobox_changed)
index = self.preferences_selection_mechanism_combobox.findText(self.preferences['dataset_selection_mechanism'], QtCore.Qt.MatchFixedString)
self.preferences_selection_mechanism_combobox.setCurrentIndex(index)
vbox_select.addWidget(self.preferences_selection_mechanism_combobox)
vbox.addLayout(vbox_select)
# vbox_inital_refinement = QtGui.QVBoxLayout()
# vbox_inital_refinement.addWidget(QtGui.QLabel('Initial Refinement Pipeline:'))
# self.preferences_initial_refinement_combobox = QtGui.QComboBox()
# for item in self.preferences_initial_refinement_pipeline:
# self.preferences_initial_refinement_combobox.addItem(item)
# self.preferences_initial_refinement_combobox.currentIndexChanged.connect(
# self.preferences_initial_refinement_combobox_changed)
# index = self.preferences_initial_refinement_combobox.findText(self.preferences['initial_refinement_pipeline'], QtCore.Qt.MatchFixedString)
# self.preferences_initial_refinement_combobox.setCurrentIndex(index)
# vbox_inital_refinement.addWidget(self.preferences_initial_refinement_combobox)
# vbox.addLayout(vbox_inital_refinement)
vbox_restraints = QtGui.QVBoxLayout()
vbox_restraints.addWidget(QtGui.QLabel('Restraints generation program:'))
self.preferences_restraints_generation_combobox = QtGui.QComboBox()
program_list = []
if self.external_software['acedrg']:
program_list.append('acedrg')
self.restraints_program = 'acedrg'
if self.external_software['phenix.elbow']: program_list.append('phenix.elbow')
if self.external_software['grade']: program_list.append('grade')
for item in program_list:
self.preferences_restraints_generation_combobox.addItem(item)
self.preferences_restraints_generation_combobox.currentIndexChanged.connect(
self.preferences_restraints_generation_combobox_changed)
index = self.preferences_restraints_generation_combobox.findText(self.restraints_program,
QtCore.Qt.MatchFixedString)
self.preferences_restraints_generation_combobox.setCurrentIndex(index)
vbox_restraints.addWidget(self.preferences_restraints_generation_combobox)
vbox.addLayout(vbox_restraints)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(QtGui.QLabel('XCE logfile:'))
self.xce_logfile_label = QtGui.QLabel(self.xce_logfile)
hbox.addWidget(self.xce_logfile_label)
button = QtGui.QPushButton("Change")
button.clicked.connect(self.set_xce_logfile)
hbox.addWidget(button)
vbox.addLayout(hbox)
settings_hbox_max_queue_jobs = QtGui.QHBoxLayout()
adjust_max_queue_jobs_label = QtGui.QLabel('Max. number of jobs running at once on DLS cluster:')
settings_hbox_max_queue_jobs.addWidget(adjust_max_queue_jobs_label)
adjust_max_queue_jobs = QtGui.QLineEdit()
adjust_max_queue_jobs.setFixedWidth(200)
adjust_max_queue_jobs.setText(str(self.max_queue_jobs))
adjust_max_queue_jobs.textChanged[str].connect(self.change_max_queue_jobs)
settings_hbox_max_queue_jobs.addWidget(adjust_max_queue_jobs)
vbox.addLayout(settings_hbox_max_queue_jobs)
settings_hbox_remote_qsub = QtGui.QHBoxLayout()
remote_qsub_label = QtGui.QLabel('remote qsub:')
settings_hbox_remote_qsub.addWidget(remote_qsub_label)
self.remote_qsub_checkbox = QtGui.QCheckBox('use')
self.remote_qsub_checkbox.toggled.connect(self.run_qsub_remotely)
settings_hbox_dimple_twin_mode = QtGui.QHBoxLayout()
self.dimple_twin_mode_label_checkbox = QtGui.QCheckBox('run DIMPLE in TWIN mode')
if self.preferences['dimple_twin_mode']:
self.dimple_twin_mode_label_checkbox.setChecked(True)
self.dimple_twin_mode_label_checkbox.toggled.connect(self.dimple_change_twin_mode)
settings_hbox_dimple_twin_mode.addWidget(self.dimple_twin_mode_label_checkbox)
vbox.addLayout(settings_hbox_dimple_twin_mode)
if self.using_remote_qsub_submission:
self.remote_qsub_checkbox.setChecked(True)
settings_hbox_remote_qsub.addWidget(self.remote_qsub_checkbox)
self.remote_qsub_command = QtGui.QLineEdit()
self.remote_qsub_command.setFixedWidth(550)
self.remote_qsub_command.setText(self.remote_qsub_submission)
settings_hbox_remote_qsub.addWidget(self.remote_qsub_command)
vbox.addLayout(settings_hbox_remote_qsub)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(QtGui.QLabel('Additional CIF file for non-standard ligand:'))
self.second_cif_file_label = QtGui.QLabel(self.second_cif_file)
hbox.addWidget(self.second_cif_file_label)
button = QtGui.QPushButton("Select")
button.clicked.connect(self.set_second_cif_file)
hbox.addWidget(button)
vbox.addLayout(hbox)
# settings_hbox_max_queue_jobs.addWidget(adjust_max_queue_jobs_label)
# adjust_max_queue_jobs = QtGui.QLineEdit()
# adjust_max_queue_jobs.setFixedWidth(200)
# adjust_max_queue_jobs.setText(str(self.max_queue_jobs))
# adjust_max_queue_jobs.textChanged[str].connect(self.change_max_queue_jobs)
# settings_hbox_max_queue_jobs.addWidget(adjust_max_queue_jobs)
# vbox.addLayout(settings_hbox_max_queue_jobs)
#
# apply_button = QtGui.QPushButton('Apply')
# apply_button.clicked.connect(self.run_qsub_remotely)
# settings_hbox_remote_qsub.addWidget(apply_button)
preferencesLayout.addLayout(vbox, 0, 0)
preferences.exec_();
# def set_second_cif_file(self):
# mb = QtGui.QMessageBox()
# mbLayout = mb.layout()
# vbox = QtGui.QVBoxLayout()
# vbox.addWidget(QtGui.QLabel('CIF file to be merged into ligand CIF files:'))
# self.second_cif_file_label = QtGui.QLabel(self.second_cif_file)
# vbox.addWidget(self.second_cif_file_label)
# button = QtGui.QPushButton("Select")
# button.clicked.connect(self.set_second_cif_file)
# vbox.addWidget(button)
# mbLayout.addLayout(vbox, 0, 0)
# mb.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
# mb.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.RejectRole)
# reply = mb.exec_();
def dimple_change_twin_mode(self):
if self.preferences['dimple_twin_mode']:
self.update_log.insert('changing preferences: turning off DIMPLE in TWIN mode')
self.preferences['dimple_twin_mode'] = False
else:
self.update_log.insert('changing preferences: changing DIMPLE to TWIN mode')
self.preferences['dimple_twin_mode'] = True
def run_qsub_remotely(self):
self.remote_qsub_submission = str(self.remote_qsub_command.text())
print(str(self.remote_qsub_submission))
if self.remote_qsub_checkbox.isChecked():
self.update_log.insert('submitting jobs to remote machine with: %s' % self.remote_qsub_submission)
self.external_software['qsub_remote'] = self.remote_qsub_submission
self.using_remote_qsub_submission = True
self.settings['remote_qsub'] = self.remote_qsub_submission
else:
self.update_log.insert('switching off remote job submission')
self.external_software['qsub_remote'] = ''
self.settings['remote_qsub'] = ''
self.using_remote_qsub_submission = False
def enter_pdb_codes(self):
pdbID_entry = QtGui.QMessageBox()
pdbID_entryLayout = pdbID_entry.layout()
vbox = QtGui.QVBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Text from PDB email'), 0, 0)
self.pdb_code_entry = QtGui.QTextEdit()
self.pdb_code_entry.setText('')
self.pdb_code_entry.setFixedWidth(500)
grid.addWidget(self.pdb_code_entry, 1, 0, 20, 1)
frame.setLayout(grid)
vbox.addWidget(frame)
hbox = QtGui.QHBoxLayout()
button = QtGui.QPushButton('Update Database')
button.clicked.connect(self.update_database_with_pdb_codes)
hbox.addWidget(button)
vbox.addLayout(hbox)
pdbID_entryLayout.addLayout(vbox, 0, 0)
pdbID_entry.exec_();
def add_label_information(self):
label_entry = QtGui.QMessageBox()
label_entryLayout = label_entry.layout()
try:
labelInfo = self.db.get_label_info_from_db()
except AttributeError:
self.update_log.warning('please specify DB file first')
return None
vbox = QtGui.QVBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('label'), 0, 0)
grid.addWidget(QtGui.QLabel('description'), 0, 1)
self.remote_qsub_command = QtGui.QLineEdit()
self.remote_qsub_command.setFixedWidth(550)
self.remote_qsub_command.setText(self.remote_qsub_submission)
self.labelList = []
for i in range(5):
labelEdit = QtGui.QLineEdit()
descriptionEdit = QtGui.QLineEdit()
grid.addWidget(labelEdit, i + 1, 0)
grid.addWidget(descriptionEdit, i + 1, 1)
try:
labelEdit.setText(labelInfo[i][0])
descriptionEdit.setText(labelInfo[i][1])
except IndexError:
labelEdit.setText('')
descriptionEdit.setText('')
labelEdit.setFixedWidth(100)
descriptionEdit.setFixedWidth(500)
self.labelList.append([labelEdit,descriptionEdit])
frame.setLayout(grid)
vbox.addWidget(frame)
hbox = QtGui.QHBoxLayout()
button = QtGui.QPushButton('Update Database')
button.clicked.connect(self.update_database_with_labelInfo)
hbox.addWidget(button)
vbox.addLayout(hbox)
label_entryLayout.addLayout(vbox, 0, 0)
label_entry.exec_();
def create_missing_apo_records_in_depositTable(self):
self.db.create_missing_apo_records_for_all_structures_in_depositTable(self.initial_model_directory,
self.xce_logfile)
# def update_file_information_of_apo_records(self):
# XChemDeposit.update_file_locations_of_apo_structuresin_DB(
# os.path.join(self.database_directory, self.data_source_file), self.initial_model_directory,
# self.xce_logfile)
def prepare_models_for_deposition_ligand_bound(self,structureType):
start_thread = True
self.update_log.insert('preparing mmcif files for PDB group deposition...')
ignore_event_map = False
if structureType == 'ground_state':
try:
self.update_log.insert('ground-state deposition')
data_template_dict = self.db.get_deposit_dict_for_sample('ground_state')
pdb = data_template_dict['PDB_file']
self.update_log.insert('looking for ground-state PDB: ' + pdb)
if not os.path.isfile(pdb):
self.update_log.error('ground-state PDB does not exist; stopping...')
start_thread = False
mtz = data_template_dict['MTZ_file']
self.update_log.insert('looking for ground-state MTZ: ' + mtz)
if not os.path.isfile(mtz):
self.update_log.error('ground-state MTZ does not exist; stopping...')
start_thread = False
ground_state = [ pdb,
mtz,
self.panddas_directory ]
except KeyError:
self.update_log.error('seems like there is no entry for ground-state in database')
start_thread = False
else:
ground_state = []
if self.deposition_bounnd_state_preparation_ignore_event_map.isChecked():
ignore_event_map = True
# structureType = "ligand_bound"
if start_thread:
if ground_state != []:
self.update_log.insert('apo PDB: ' + ground_state[0])
self.update_log.insert('apo MTZ: ' + ground_state[1])
self.update_log.insert('pandda directory: ' + ground_state[2])
overwrite_existing_mmcif = True
self.work_thread = XChemDeposit.prepare_mmcif_files_for_deposition(
os.path.join(self.database_directory, self.data_source_file),
self.xce_logfile,
overwrite_existing_mmcif,
self.initial_model_directory,
ground_state,
ignore_event_map)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def prepare_models_for_deposition_apo(self):
structureType = "apo"
overwrite_existing_mmcif = True
self.work_thread = XChemDeposit.prepare_mmcif_files_for_deposition(
os.path.join(self.database_directory, self.data_source_file),
self.xce_logfile,
overwrite_existing_mmcif,
self.initial_model_directory,
structureType)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def prepare_for_group_deposition_upload_ligand_bound(self):
self.work_thread = XChemDeposit.prepare_for_group_deposition_upload(
os.path.join(self.database_directory, self.data_source_file),
self.xce_logfile,
self.group_deposit_directory,self.initial_model_directory,'ligand_bound')
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def prepare_for_group_deposition_upload_ground_state(self):
self.work_thread = XChemDeposit.prepare_for_group_deposition_upload(
os.path.join(self.database_directory, self.data_source_file),
self.xce_logfile,
self.group_deposit_directory,self.initial_model_directory,'ground_state')
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def check_smiles_in_db_and_pdb(self):
self.work_thread = XChemDeposit.compare_smiles_in_db_with_ligand_in_pdb(self.initial_model_directory,
os.path.join(self.database_directory,
self.data_source_file),
self.xce_logfile)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("show_error_dict"), self.show_error_dict)
self.work_thread.start()
def deposition_data(self):
depositData = QtGui.QMessageBox()
depositDataLayout = depositData.layout()
vbox = QtGui.QVBoxLayout()
deposit_tab_widget = QtGui.QTabWidget()
deposit_tab_list = ['Contact',
'General',
'Authors',
'Citation',
'Molecule',
'Misc',
'Methods',
'Software',
'Funding' ]
deposit_tab_dict = {}
for page in deposit_tab_list:
tab = QtGui.QWidget()
vb = QtGui.QVBoxLayout(tab)
deposit_tab_widget.addTab(tab, page)
deposit_tab_dict[page] = [tab, vb]
## PI and scientist info
vb = QtGui.QVBoxLayout()
hbox = QtGui.QHBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Principal Investigator'), 0, 0)
grid.addWidget(QtGui.QLabel('Salutation'), 1, 0)
self.contact_author_PI_salutation = QtGui.QLineEdit()
self.contact_author_PI_salutation.setText('Dr.')
self.contact_author_PI_salutation.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_salutation, 1, 1)
grid.addWidget(QtGui.QLabel('First name'), 2, 0)
self.contact_author_PI_first_name = QtGui.QLineEdit()
self.contact_author_PI_first_name.setText('')
self.contact_author_PI_first_name.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_first_name, 2, 1)
grid.addWidget(QtGui.QLabel('Last name'), 3, 0)
self.contact_author_PI_last_name = QtGui.QLineEdit()
self.contact_author_PI_last_name.setText('')
self.contact_author_PI_last_name.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_last_name, 3, 1)
grid.addWidget(QtGui.QLabel('Middle name'), 4, 0)
self.contact_author_PI_middle_name = QtGui.QLineEdit()
self.contact_author_PI_middle_name.setText('')
self.contact_author_PI_middle_name.setFixedWidth(200)
self.contact_author_PI_middle_name.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.contact_author_PI_middle_name, 4, 1)
grid.addWidget(QtGui.QLabel('PI role'), 5, 0)
self.contact_author_PI_role = QtGui.QComboBox()
# PIroles = ['group leader', 'principal investigator/group leader', 'investigator']
PIroles = ['principal investigator/group leader']
for item in PIroles: self.contact_author_PI_role.addItem(item)
grid.addWidget(self.contact_author_PI_role, 5, 1)
grid.addWidget(QtGui.QLabel('Organization type'), 6, 0)
self.contact_author_PI_organization_type = QtGui.QComboBox()
Organizations = ['academic', 'commercial', 'government']
for item in Organizations: self.contact_author_PI_organization_type.addItem(item)
grid.addWidget(self.contact_author_PI_organization_type, 6, 1)
grid.addWidget(QtGui.QLabel('Organization Name'), 7, 0)
self.contact_author_PI_organization_name = QtGui.QLineEdit()
self.contact_author_PI_organization_name.setText('')
self.contact_author_PI_organization_name.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_organization_name, 7, 1)
grid.addWidget(QtGui.QLabel('Email'), 8, 0)
self.contact_author_PI_email = QtGui.QLineEdit()
self.contact_author_PI_email.setText('')
self.contact_author_PI_email.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_email, 8, 1)
grid.addWidget(QtGui.QLabel('Street'), 9, 0)
self.contact_author_PI_address = QtGui.QLineEdit()
self.contact_author_PI_address.setText('')
self.contact_author_PI_address.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_address, 9, 1)
grid.addWidget(QtGui.QLabel('City'), 10, 0)
self.contact_author_PI_city = QtGui.QLineEdit()
self.contact_author_PI_city.setText('')
self.contact_author_PI_city.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_city, 10, 1)
grid.addWidget(QtGui.QLabel('State'), 11, 0)
self.contact_author_PI_State_or_Province = QtGui.QLineEdit()
self.contact_author_PI_State_or_Province.setText('')
self.contact_author_PI_State_or_Province.setFixedWidth(200)
self.contact_author_PI_State_or_Province.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.contact_author_PI_State_or_Province, 11, 1)
grid.addWidget(QtGui.QLabel('ZIP code'), 12, 0)
self.contact_author_PI_Zip_Code = QtGui.QLineEdit()
self.contact_author_PI_Zip_Code.setText('')
self.contact_author_PI_Zip_Code.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_Zip_Code, 12, 1)
grid.addWidget(QtGui.QLabel('Country'), 13, 0)
self.contact_author_PI_Country = QtGui.QLineEdit()
self.contact_author_PI_Country.setText('')
self.contact_author_PI_Country.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_Country, 13, 1)
grid.addWidget(QtGui.QLabel('Phone'), 14, 0)
self.contact_author_PI_phone_number = QtGui.QLineEdit()
self.contact_author_PI_phone_number.setText('')
self.contact_author_PI_phone_number.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_phone_number, 14, 1)
grid.addWidget(QtGui.QLabel('ORCID'), 15, 0)
self.contact_author_PI_ORCID = QtGui.QLineEdit()
self.contact_author_PI_ORCID.setText('')
self.contact_author_PI_ORCID.setFixedWidth(200)
grid.addWidget(self.contact_author_PI_ORCID, 15, 1)
frame.setLayout(grid)
hbox.addWidget(frame)
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Responsible Scientist'), 0, 0)
grid.addWidget(QtGui.QLabel('Salutation'), 1, 0)
self.contact_author_salutation = QtGui.QLineEdit()
self.contact_author_salutation.setText('Dr.')
self.contact_author_salutation.setFixedWidth(200)
grid.addWidget(self.contact_author_salutation, 1, 1)
grid.addWidget(QtGui.QLabel('First name'), 2, 0)
self.contact_author_first_name = QtGui.QLineEdit()
self.contact_author_first_name.setText('')
self.contact_author_first_name.setFixedWidth(200)
grid.addWidget(self.contact_author_first_name, 2, 1)
grid.addWidget(QtGui.QLabel('Last name'), 3, 0)
self.contact_author_last_name = QtGui.QLineEdit()
self.contact_author_last_name.setText('')
self.contact_author_last_name.setFixedWidth(200)
grid.addWidget(self.contact_author_last_name, 3, 1)
grid.addWidget(QtGui.QLabel('Middle name'), 4, 0)
self.contact_author_middle_name = QtGui.QLineEdit()
self.contact_author_middle_name.setText('')
self.contact_author_middle_name.setFixedWidth(200)
self.contact_author_middle_name.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.contact_author_middle_name, 4, 1)
grid.addWidget(QtGui.QLabel('Role'), 5, 0)
self.contact_author_role = QtGui.QComboBox()
ScientistRoles = ['responsible scientist', 'investigator']
for item in ScientistRoles: self.contact_author_role.addItem(item)
grid.addWidget(self.contact_author_role, 5, 1)
grid.addWidget(QtGui.QLabel('Organization type'), 6, 0)
self.contact_author_organization_type = QtGui.QComboBox()
for item in Organizations: self.contact_author_organization_type.addItem(item)
grid.addWidget(self.contact_author_organization_type, 6, 1)
grid.addWidget(QtGui.QLabel('Organization Name'), 7, 0)
self.contact_author_organization_name = QtGui.QLineEdit()
self.contact_author_organization_name.setText('')
self.contact_author_organization_name.setFixedWidth(200)
grid.addWidget(self.contact_author_organization_name, 7, 1)
grid.addWidget(QtGui.QLabel('Email'), 8, 0)
self.contact_author_email = QtGui.QLineEdit()
self.contact_author_email.setText('')
self.contact_author_email.setFixedWidth(200)
grid.addWidget(self.contact_author_email, 8, 1)
grid.addWidget(QtGui.QLabel('Street'), 9, 0)
self.contact_author_address = QtGui.QLineEdit()
self.contact_author_address.setText('')
self.contact_author_address.setFixedWidth(200)
grid.addWidget(self.contact_author_address, 9, 1)
grid.addWidget(QtGui.QLabel('City'), 10, 0)
self.contact_author_city = QtGui.QLineEdit()
self.contact_author_city.setText('')
self.contact_author_city.setFixedWidth(200)
grid.addWidget(self.contact_author_city, 10, 1)
grid.addWidget(QtGui.QLabel('State'), 11, 0)
self.contact_author_State_or_Province = QtGui.QLineEdit()
self.contact_author_State_or_Province.setText('')
self.contact_author_State_or_Province.setFixedWidth(200)
self.contact_author_State_or_Province.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.contact_author_State_or_Province, 11, 1)
grid.addWidget(QtGui.QLabel('ZIP code'), 12, 0)
self.contact_author_Zip_Code = QtGui.QLineEdit()
self.contact_author_Zip_Code.setText('')
self.contact_author_Zip_Code.setFixedWidth(200)
grid.addWidget(self.contact_author_Zip_Code, 12, 1)
grid.addWidget(QtGui.QLabel('Country'), 13, 0)
self.contact_author_Country = QtGui.QLineEdit()
self.contact_author_Country.setText('')
self.contact_author_Country.setFixedWidth(200)
grid.addWidget(self.contact_author_Country, 13, 1)
grid.addWidget(QtGui.QLabel('Phone'), 14, 0)
self.contact_author_phone_number = QtGui.QLineEdit()
self.contact_author_phone_number.setText('')
self.contact_author_phone_number.setFixedWidth(200)
grid.addWidget(self.contact_author_phone_number, 14, 1)
grid.addWidget(QtGui.QLabel('ORCID'), 15, 0)
self.contact_author_ORCID = QtGui.QLineEdit()
self.contact_author_ORCID.setText('')
self.contact_author_ORCID.setFixedWidth(200)
grid.addWidget(self.contact_author_ORCID, 15, 1)
frame.setLayout(grid)
hbox.addWidget(frame)
vb.addLayout(hbox)
vb.addWidget(QtGui.QLabel(XChemToolTips.deposition_interface_note()))
vb.addStretch(1)
deposit_tab_dict['Contact'][1].addLayout(vb)
## release status
vb = QtGui.QVBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Release status'), 0, 0)
grid.addWidget(QtGui.QLabel('Release Status for sequence'), 4, 0)
self.Release_status_for_sequence = QtGui.QComboBox()
codeStatus = ['RELEASE NOW', 'HOLD FOR RELEASE']
for item in codeStatus: self.Release_status_for_sequence.addItem(item)
grid.addWidget(self.Release_status_for_sequence, 4, 1)
grid.addWidget(QtGui.QLabel('Release Status for coordinates/ SF'), 8, 0)
self.Release_status_for_coordinates = QtGui.QComboBox()
coordStatus = ['RELEASE NOW', 'HOLD FOR PUBLICATION', 'HOLD FOR 4 WEEKS', 'HOLD FOR 6 MONTHS',
'HOLD FOR 1 YEAR']
for item in coordStatus: self.Release_status_for_coordinates.addItem(item)
grid.addWidget(self.Release_status_for_coordinates, 8, 1)
frame.setLayout(grid)
vb.addWidget(frame)
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Title & Details'), 0, 0)
note = (
'Note: supported wildcards: $ProteinName,$CompoundName; e.g. "Crystal Structure of human JMJD2D in complex with N2317a"')
grid.addWidget(QtGui.QLabel(note), 1, 0)
grid.addWidget(QtGui.QLabel('Group deposition title'), 2, 0)
self.group_deposition_title = QtGui.QLineEdit()
self.group_deposition_title.setText('PanDDA analysis group deposition')
self.group_deposition_title.setFixedWidth(600)
# self.group_deposition_title.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.group_deposition_title, 2, 1)
grid.addWidget(QtGui.QLabel('Description'), 3, 0)
self.group_description = QtGui.QLineEdit()
self.group_description.setText(
'XDomainX of XOrganismX $ProteinName screened against the XXX Fragment Library by X-ray Crystallography at the XChem facility of Diamond Light Source beamline I04-1')
self.group_description.setFixedWidth(600)
grid.addWidget(self.group_description, 3, 1)
grid.addWidget(QtGui.QLabel('Structure Title (ligand bound)'), 4, 0)
self.structure_title = QtGui.QLineEdit()
self.structure_title.setText('Crystal Structure of $ProteinName in complex with $CompoundName')
self.structure_title.setFixedWidth(600)
grid.addWidget(self.structure_title, 4, 1)
note = ('\n\nApo Structure:\nonly use if you want to deposit PanDDA models!')
grid.addWidget(QtGui.QLabel(note), 6, 0)
grid.addWidget(QtGui.QLabel('Structure Title (apo)'), 7, 0)
self.structure_title_apo = QtGui.QLineEdit()
self.structure_title_apo.setText(
'PanDDA analysis group deposition of ground-state model of $ProteinName')
self.structure_title_apo.setFixedWidth(600)
grid.addWidget(self.structure_title_apo, 7, 1)
frame.setLayout(grid)
vb.addWidget(frame)
vb.addStretch(1)
deposit_tab_dict['General'][1].addLayout(vb)
## authors
vb = QtGui.QVBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Deposition authors (e.g. Surname, F.M.)'), 0, 0)
self.structure_author_name_List = []
for column in range(0, 2):
for row in range(1, 15):
structure_author_name = QtGui.QLineEdit()
structure_author_name.setText('')
structure_author_name.setFixedWidth(300)
grid.addWidget(structure_author_name, row, column)
self.structure_author_name_List.append(structure_author_name)
frame.setLayout(grid)
vb.addWidget(frame)
vb.addStretch(1)
deposit_tab_dict['Authors'][1].addLayout(vb)
## primary citation
vb = QtGui.QVBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Primary Citation'), 0, 0)
grid.addWidget(QtGui.QLabel('ID'), 1, 0)
self.primary_citation_id = QtGui.QLineEdit()
self.primary_citation_id.setText('primary')
self.primary_citation_id.setFixedWidth(500)
grid.addWidget(self.primary_citation_id, 1, 1)
grid.addWidget(QtGui.QLabel('Journal'), 2, 0)
self.primary_citation_journal_abbrev = QtGui.QLineEdit()
self.primary_citation_journal_abbrev.setText('To be published')
self.primary_citation_journal_abbrev.setFixedWidth(500)
grid.addWidget(self.primary_citation_journal_abbrev, 2, 1)
grid.addWidget(QtGui.QLabel('Title'), 3, 0)
self.primary_citation_title = QtGui.QLineEdit()
self.primary_citation_title.setText('')
self.primary_citation_title.setFixedWidth(500)
self.primary_citation_title.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.primary_citation_title, 3, 1)
grid.addWidget(QtGui.QLabel('Year'), 4, 0)
self.primary_citation_year = QtGui.QLineEdit()
self.primary_citation_year.setText('')
self.primary_citation_year.setFixedWidth(500)
self.primary_citation_year.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.primary_citation_year, 4, 1)
grid.addWidget(QtGui.QLabel('Volume'), 5, 0)
self.primary_citation_journal_volume = QtGui.QLineEdit()
self.primary_citation_journal_volume.setText('')
self.primary_citation_journal_volume.setFixedWidth(500)
self.primary_citation_journal_volume.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.primary_citation_journal_volume, 5, 1)
grid.addWidget(QtGui.QLabel('Page, first'), 6, 0)
self.primary_citation_page_first = QtGui.QLineEdit()
self.primary_citation_page_first.setText('')
self.primary_citation_page_first.setFixedWidth(500)
self.primary_citation_page_first.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.primary_citation_page_first, 6, 1)
grid.addWidget(QtGui.QLabel('Page, last'), 7, 0)
self.primary_citation_page_last = QtGui.QLineEdit()
self.primary_citation_page_last.setText('')
self.primary_citation_page_last.setFixedWidth(500)
self.primary_citation_page_last.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.primary_citation_page_last, 7, 1)
frame.setLayout(grid)
vb.addWidget(frame)
## citation authors
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
self.set_primary_citation_authors = QtGui.QCheckBox('same as deposition authors')
self.layout_funcs.add_checkbox(self, self.set_primary_citation_authors,
'xce_object.set_primary_citation_as_structure_authors')
grid.addWidget(self.set_primary_citation_authors, 0, 0)
self.primary_citation_author_name_List = []
for column in range(0, 2):
for row in range(1, 15):
primary_citation_author_name = QtGui.QLineEdit()
primary_citation_author_name.setText('')
primary_citation_author_name.setFixedWidth(300)
grid.addWidget(primary_citation_author_name, row, column)
self.primary_citation_author_name_List.append(primary_citation_author_name)
frame.setLayout(grid)
vb.addWidget(frame)
vb.addStretch(1)
deposit_tab_dict['Citation'][1].addLayout(vb)
## molecule info
vb = QtGui.QVBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Entity 1'), 1, 0)
grid.addWidget(QtGui.QLabel('Molecule Name'), 2, 0)
self.molecule_name = QtGui.QLineEdit()
self.molecule_name.setText('')
self.molecule_name.setFixedWidth(300)
# self.molecule_name.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.molecule_name, 2, 1)
grid.addWidget(QtGui.QLabel('(e.g. RNA Hammerhead Ribozyme)'), 2, 2)
grid.addWidget(QtGui.QLabel('Fragment Name'), 3, 0)
self.fragment_name_one = QtGui.QLineEdit()
self.fragment_name_one.setText('')
self.fragment_name_one.setFixedWidth(300)
self.fragment_name_one.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.fragment_name_one, 3, 1)
grid.addWidget(QtGui.QLabel('(e.g. ligand binding domain, hairpin)'), 3, 2)
grid.addWidget(QtGui.QLabel('Specific Mutation'), 4, 0)
self.fragment_name_one_specific_mutation = QtGui.QLineEdit()
self.fragment_name_one_specific_mutation.setText('')
self.fragment_name_one_specific_mutation.setFixedWidth(300)
self.fragment_name_one_specific_mutation.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.fragment_name_one_specific_mutation, 4, 1)
grid.addWidget(QtGui.QLabel('(e.g. C280S)'), 4, 2)
grid.addWidget(QtGui.QLabel('Enzyme Comission Number'), 5, 0)
self.fragment_name_one_enzyme_comission_number = QtGui.QLineEdit()
self.fragment_name_one_enzyme_comission_number.setText('')
self.fragment_name_one_enzyme_comission_number.setFixedWidth(300)
self.fragment_name_one_enzyme_comission_number.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.fragment_name_one_enzyme_comission_number, 5, 1)
grid.addWidget(QtGui.QLabel('(if known: e.g. 2.7.7.7)'), 5, 2)
grid.addWidget(QtGui.QLabel('Genetically Manipulated Source'), 6, 0)
grid.addWidget(QtGui.QLabel('Source organism scientific name'), 7, 0)
self.Source_organism_scientific_name = QtGui.QComboBox()
taxonomy_dict = XChemMain.NCBI_taxonomy_ID()
for item in taxonomy_dict:
self.Source_organism_scientific_name.addItem(taxonomy_dict[item])
grid.addWidget(self.Source_organism_scientific_name, 7, 1)
grid.addWidget(QtGui.QLabel('Source organism gene'), 8, 0)
self.Source_organism_gene = QtGui.QLineEdit()
self.Source_organism_gene.setText('')
self.Source_organism_gene.setFixedWidth(300)
grid.addWidget(self.Source_organism_gene, 8, 1)
grid.addWidget(QtGui.QLabel('(e.g. RPOD, ALKA...)'), 8, 2)
grid.addWidget(QtGui.QLabel('Source organism strain'), 9, 0)
self.Source_organism_strain = QtGui.QLineEdit()
self.Source_organism_strain.setText('')
self.Source_organism_strain.setFixedWidth(300)
self.Source_organism_strain.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.Source_organism_strain, 9, 1)
grid.addWidget(QtGui.QLabel('(e.g. BH10 ISOLATE, K-12...)'), 9, 2)
grid.addWidget(QtGui.QLabel('Expression system scientific name'), 10, 0)
self.Expression_system_scientific_name = QtGui.QComboBox()
for item in taxonomy_dict:
self.Expression_system_scientific_name.addItem(taxonomy_dict[item])
grid.addWidget(self.Expression_system_scientific_name, 10, 1)
grid.addWidget(QtGui.QLabel('Expression system strain'), 11, 0)
self.Expression_system_strain = QtGui.QLineEdit()
self.Expression_system_strain.setText('')
self.Expression_system_strain.setFixedWidth(300)
self.Expression_system_strain.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.Expression_system_strain, 11, 1)
grid.addWidget(QtGui.QLabel('(e.g. BL21(DE3))'), 11, 2)
grid.addWidget(QtGui.QLabel('Expression system vector type'), 12, 0)
self.Expression_system_vector_type = QtGui.QLineEdit()
self.Expression_system_vector_type.setText('')
self.Expression_system_vector_type.setFixedWidth(300)
self.Expression_system_vector_type.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.Expression_system_vector_type, 12, 1)
grid.addWidget(QtGui.QLabel('(e.g. plasmid)'), 12, 2)
grid.addWidget(QtGui.QLabel('Expression_system_plasmid_name'), 13, 0)
self.Expression_system_plasmid_name = QtGui.QLineEdit()
self.Expression_system_plasmid_name.setText('')
self.Expression_system_plasmid_name.setFixedWidth(300)
self.Expression_system_plasmid_name.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.Expression_system_plasmid_name, 13, 1)
grid.addWidget(QtGui.QLabel('(e.g. pET26)'), 13, 2)
grid.addWidget(QtGui.QLabel('Manipulated_source_details'), 14, 0)
self.Manipulated_source_details = QtGui.QLineEdit()
self.Manipulated_source_details.setText('')
self.Manipulated_source_details.setFixedWidth(300)
self.Manipulated_source_details.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.Manipulated_source_details, 14, 1)
grid.addWidget(QtGui.QLabel('(any other relevant information)'), 14, 2)
grid.addWidget(QtGui.QLabel('Chains'), 15, 0)
self.molecule_chain_one = QtGui.QLineEdit()
self.molecule_chain_one.setText('')
self.molecule_chain_one.setFixedWidth(300)
grid.addWidget(self.molecule_chain_one, 15, 1)
grid.addWidget(QtGui.QLabel('(e.g. A or A,B)'), 15, 2)
frame.setLayout(grid)
vb.addWidget(frame)
### entity 2
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Entity 2 (IMPORTANT: only fill in if you are working with a protein-protein complex!)'), 1, 0)
grid.addWidget(QtGui.QLabel('Molecule Name'), 2, 0)
self.molecule_name_two = QtGui.QLineEdit()
self.molecule_name_two.setText('')
self.molecule_name_two.setFixedWidth(300)
# self.molecule_name_two.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.molecule_name_two, 2, 1)
grid.addWidget(QtGui.QLabel('(e.g. RNA Hammerhead Ribozyme)'), 2, 2)
grid.addWidget(QtGui.QLabel('Fragment Name'), 3, 0)
self.fragment_name_two = QtGui.QLineEdit()
self.fragment_name_two.setText('')
self.fragment_name_two.setFixedWidth(300)
self.fragment_name_two.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.fragment_name_two, 3, 1)
grid.addWidget(QtGui.QLabel('(e.g. ligand binding domain, hairpin)'), 3, 2)
grid.addWidget(QtGui.QLabel('Specific Mutation'), 4, 0)
self.fragment_name_two_specific_mutation = QtGui.QLineEdit()
self.fragment_name_two_specific_mutation.setText('')
self.fragment_name_two_specific_mutation.setFixedWidth(300)
self.fragment_name_two_specific_mutation.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.fragment_name_two_specific_mutation, 4, 1)
grid.addWidget(QtGui.QLabel('(e.g. C280S)'), 4, 2)
grid.addWidget(QtGui.QLabel('Enzyme Comission Number'), 5, 0)
self.fragment_name_two_enzyme_comission_number = QtGui.QLineEdit()
self.fragment_name_two_enzyme_comission_number.setText('')
self.fragment_name_two_enzyme_comission_number.setFixedWidth(300)
self.fragment_name_two_enzyme_comission_number.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.fragment_name_two_enzyme_comission_number, 5, 1)
grid.addWidget(QtGui.QLabel('(if known: e.g. 2.7.7.7)'), 5, 2)
grid.addWidget(QtGui.QLabel('Genetically Manipulated Source'), 6, 0)
grid.addWidget(QtGui.QLabel('Source organism scientific name'), 7, 0)
self.Source_organism_scientific_name_two = QtGui.QComboBox()
taxonomy_dict = XChemMain.NCBI_taxonomy_ID()
for item in taxonomy_dict:
self.Source_organism_scientific_name_two.addItem(taxonomy_dict[item])
grid.addWidget(self.Source_organism_scientific_name_two, 7, 1)
grid.addWidget(QtGui.QLabel('Source organism gene'), 8, 0)
self.Source_organism_gene_two = QtGui.QLineEdit()
self.Source_organism_gene_two.setText('')
self.Source_organism_gene_two.setFixedWidth(300)
grid.addWidget(self.Source_organism_gene_two, 8, 1)
grid.addWidget(QtGui.QLabel('(e.g. RPOD, ALKA...)'), 8, 2)
grid.addWidget(QtGui.QLabel('Source organism strain'), 9, 0)
self.Source_organism_strain_two = QtGui.QLineEdit()
self.Source_organism_strain_two.setText('')
self.Source_organism_strain_two.setFixedWidth(300)
self.Source_organism_strain_two.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.Source_organism_strain_two, 9, 1)
grid.addWidget(QtGui.QLabel('(e.g. BH10 ISOLATE, K-12...)'), 9, 2)
grid.addWidget(QtGui.QLabel('Expression system scientific name'), 10, 0)
self.Expression_system_scientific_name_two = QtGui.QComboBox()
for item in taxonomy_dict:
self.Expression_system_scientific_name_two.addItem(taxonomy_dict[item])
grid.addWidget(self.Expression_system_scientific_name_two, 10, 1)
grid.addWidget(QtGui.QLabel('Expression system strain'), 11, 0)
self.Expression_system_strain_two = QtGui.QLineEdit()
self.Expression_system_strain_two.setText('')
self.Expression_system_strain_two.setFixedWidth(300)
self.Expression_system_strain_two.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.Expression_system_strain_two, 11, 1)
grid.addWidget(QtGui.QLabel('(e.g. BL21(DE3))'), 11, 2)
grid.addWidget(QtGui.QLabel('Expression system vector type'), 12, 0)
self.Expression_system_vector_type_two = QtGui.QLineEdit()
self.Expression_system_vector_type_two.setText('')
self.Expression_system_vector_type_two.setFixedWidth(300)
self.Expression_system_vector_type_two.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.Expression_system_vector_type_two, 12, 1)
grid.addWidget(QtGui.QLabel('(e.g. plasmid)'), 12, 2)
grid.addWidget(QtGui.QLabel('Expression_system_plasmid_name'), 13, 0)
self.Expression_system_plasmid_name_two = QtGui.QLineEdit()
self.Expression_system_plasmid_name_two.setText('')
self.Expression_system_plasmid_name_two.setFixedWidth(300)
self.Expression_system_plasmid_name_two.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.Expression_system_plasmid_name_two, 13, 1)
grid.addWidget(QtGui.QLabel('(e.g. pET26)'), 13, 2)
grid.addWidget(QtGui.QLabel('Manipulated_source_details'), 14, 0)
self.Manipulated_source_details_two = QtGui.QLineEdit()
self.Manipulated_source_details_two.setText('')
self.Manipulated_source_details_two.setFixedWidth(300)
self.Manipulated_source_details_two.setStyleSheet("background-color: rgb(192, 192, 192);")
grid.addWidget(self.Manipulated_source_details_two, 14, 1)
grid.addWidget(QtGui.QLabel('(any other relevant information)'), 14, 2)
grid.addWidget(QtGui.QLabel('Chains'), 15, 0)
self.molecule_chain_two = QtGui.QLineEdit()
self.molecule_chain_two.setText('')
self.molecule_chain_two.setFixedWidth(300)
grid.addWidget(self.molecule_chain_two, 15, 1)
grid.addWidget(QtGui.QLabel('(e.g. A or A,B)'), 15, 2)
frame.setLayout(grid)
vb.addWidget(frame)
### entity 2 --- END
vb.addStretch(1)
deposit_tab_dict['Molecule'][1].addLayout(vb)
## misc
vb = QtGui.QVBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Keywords'), 1, 0)
self.structure_keywords = QtGui.QLineEdit()
self.structure_keywords.setText('SGC - Diamond I04-1 fragment screening, PanDDA, XChemExplorer')
self.structure_keywords.setFixedWidth(300)
grid.addWidget(self.structure_keywords, 1, 1)
grid.addWidget(QtGui.QLabel('(e.g. beta barrel, protein-DNA complex)'), 1, 2)
grid.addWidget(QtGui.QLabel('Type'), 2, 0)
self.structure_keywords_type = QtGui.QComboBox()
self.structure_keywords_type.setStyleSheet("background-color: rgb(192, 192, 192);")
for item in XChemMain.pdbx_keywords(): self.structure_keywords_type.addItem(item)
grid.addWidget(self.structure_keywords_type, 2, 1)
# self.structure_keywords = QtGui.QLineEdit()
# self.structure_keywords.setText('SGC - Diamond I04-1 fragment screening, PanDDA, XChemExplorer')
# self.structure_keywords.setFixedWidth(300)
# grid.addWidget(self.structure_keywords, 1, 1)
# grid.addWidget(QtGui.QLabel('(e.g. beta barrel, protein-DNA complex)'), 1, 2)
grid.addWidget(QtGui.QLabel('Biological Assembly'), 3, 0)
self.biological_assembly_chain_number = QtGui.QLineEdit()
self.biological_assembly_chain_number.setText('')
self.biological_assembly_chain_number.setFixedWidth(300)
grid.addWidget(self.biological_assembly_chain_number, 3, 1)
grid.addWidget(QtGui.QLabel('(e.g. 1 for monomer, 2 for dimer ..)'), 3, 2)
grid.addWidget(QtGui.QLabel('Sequence UNIPROT ID'), 4, 0)
self.molecule_one_letter_sequence_uniprot_id = QtGui.QLineEdit()
self.molecule_one_letter_sequence_uniprot_id.setText('')
self.molecule_one_letter_sequence_uniprot_id.setFixedWidth(300)
grid.addWidget(self.molecule_one_letter_sequence_uniprot_id, 4, 1)
grid.addWidget(QtGui.QLabel('(e.g. Q6B0I6)'), 4, 2)
grid.addWidget(QtGui.QLabel('Sequence'), 5, 0)
self.molecule_one_letter_sequence = QtGui.QTextEdit()
self.molecule_one_letter_sequence.setStyleSheet("background-color: rgb(255, 255, 255);")
# self.molecule_one_letter_sequence.setStyleSheet("background-color: rgb(192, 192, 192);")
self.molecule_one_letter_sequence.setText('')
self.molecule_one_letter_sequence.setFixedWidth(300)
grid.addWidget(self.molecule_one_letter_sequence, 5, 1, 8, 2)
# grid.addWidget(QtGui.QLabel('Sequence information for entity 2'), 10, 0)
# grid.addWidget(QtGui.QLabel('(Important: only for protein-protein complex'), 10, 1)
grid.addWidget(QtGui.QLabel('Sequence UNIPROT ID (Entity 2) - optional'), 13, 0)
self.molecule_one_letter_sequence_uniprot_id_two = QtGui.QLineEdit()
self.molecule_one_letter_sequence_uniprot_id_two.setText('')
self.molecule_one_letter_sequence_uniprot_id_two.setStyleSheet("background-color: rgb(192, 192, 192);")
self.molecule_one_letter_sequence_uniprot_id_two.setFixedWidth(300)
grid.addWidget(self.molecule_one_letter_sequence_uniprot_id_two, 13, 1)
grid.addWidget(QtGui.QLabel('(e.g. Q6B0I6)'), 13, 2)
grid.addWidget(QtGui.QLabel('Sequence (Entity 2) - optional'), 14, 0)
self.molecule_one_letter_sequence_two = QtGui.QTextEdit()
self.molecule_one_letter_sequence_two.setText('')
self.molecule_one_letter_sequence_two.setFixedWidth(300)
grid.addWidget(self.molecule_one_letter_sequence_two, 14, 1, 19, 2)
grid.addWidget(QtGui.QLabel('Structural Genomic (optional)'), 21, 0)
grid.addWidget(QtGui.QLabel('Project Name'), 22, 0)
self.SG_project_name = QtGui.QLineEdit()
self.SG_project_name.setText('')
self.SG_project_name.setStyleSheet("background-color: rgb(192, 192, 192);")
self.SG_project_name.setFixedWidth(300)
grid.addWidget(self.SG_project_name, 22, 1)
grid.addWidget(QtGui.QLabel('(e.g. SGC, Structural Genomics Consortium)'), 22, 2)
grid.addWidget(QtGui.QLabel('Full Name'), 23, 0)
self.full_name_of_SG_center = QtGui.QLineEdit()
self.full_name_of_SG_center.setText('')
self.full_name_of_SG_center.setStyleSheet("background-color: rgb(192, 192, 192);")
self.full_name_of_SG_center.setFixedWidth(300)
grid.addWidget(self.full_name_of_SG_center, 23, 1)
grid.addWidget(QtGui.QLabel('(e.g. Structural Genomics Consortium)'), 23, 2)
frame.setLayout(grid)
vb.addWidget(frame)
vb.addStretch(1)
deposit_tab_dict['Misc'][1].addLayout(vb)
## methods
vb = QtGui.QVBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Crystallization'), 1, 0)
grid.addWidget(QtGui.QLabel('Method'), 2, 0)
self.crystallization_method = QtGui.QComboBox()
for item in XChemMain.crystal_growth_methods(): self.crystallization_method.addItem(item)
grid.addWidget(self.crystallization_method, 2, 1)
grid.addWidget(QtGui.QLabel('pH'), 3, 0)
self.crystallization_pH = QtGui.QLineEdit()
self.crystallization_pH.setText('')
self.crystallization_pH.setFixedWidth(300)
grid.addWidget(self.crystallization_pH, 3, 1)
grid.addWidget(QtGui.QLabel('(e.g. 7.5 ...)'), 3, 2)
grid.addWidget(QtGui.QLabel('Temperature'), 4, 0)
self.crystallization_temperature = QtGui.QLineEdit()
self.crystallization_temperature.setText('')
self.crystallization_temperature.setFixedWidth(300)
grid.addWidget(self.crystallization_temperature, 4, 1)
grid.addWidget(QtGui.QLabel('(e.g. 298) (in Kelvin)'), 4, 2)
grid.addWidget(QtGui.QLabel('Condition'), 5, 0)
self.crystallization_details = QtGui.QLineEdit()
self.crystallization_details.setText('')
self.crystallization_details.setFixedWidth(300)
grid.addWidget(self.crystallization_details, 5, 1)
grid.addWidget(QtGui.QLabel('(e.g. PEG 4000, NaCl etc.)'), 5, 2)
grid.addWidget(QtGui.QLabel('Diffraction Experiment'), 6, 0)
note = ('Note: this information will only be used if it is\n'
'not already available in the mainTable!\n'
'Ignore if data were collected at DLS')
grid.addWidget(QtGui.QLabel(note), 7, 0)
grid.addWidget(QtGui.QLabel('Source'), 8, 0)
self.radiation_source = QtGui.QComboBox()
for item in XChemMain.radiationSource(): self.radiation_source.addItem(item)
grid.addWidget(self.radiation_source, 8, 1)
grid.addWidget(QtGui.QLabel('Source Type'), 9, 0)
self.radiation_source_type = QtGui.QComboBox()
for item in XChemMain.wwBeamlines(): self.radiation_source_type.addItem(item)
grid.addWidget(self.radiation_source_type, 9, 1)
grid.addWidget(QtGui.QLabel('Wavelength'), 10, 0)
self.radiation_wavelengths = QtGui.QLineEdit()
self.radiation_wavelengths.setText('')
self.radiation_wavelengths.setFixedWidth(300)
grid.addWidget(self.radiation_wavelengths, 10, 1)
grid.addWidget(QtGui.QLabel('(e.g. 1.502)'), 10, 2)
grid.addWidget(QtGui.QLabel('Detector'), 11, 0)
self.radiation_detector = QtGui.QComboBox()
for item in XChemMain.detector(): self.radiation_detector.addItem(item)
grid.addWidget(self.radiation_detector, 11, 1)
grid.addWidget(QtGui.QLabel('Detector Type'), 12, 0)
self.radiation_detector_type = QtGui.QComboBox()
for item in XChemMain.detectorType(): self.radiation_detector_type.addItem(item)
grid.addWidget(self.radiation_detector_type, 12, 1)
grid.addWidget(QtGui.QLabel('Date'), 13, 0)
self.data_collection_date = QtGui.QLineEdit()
self.data_collection_date.setText('')
self.data_collection_date.setFixedWidth(300)
grid.addWidget(self.data_collection_date, 13, 1)
grid.addWidget(QtGui.QLabel('(e.g. 2004-01-07)'), 13, 2)
grid.addWidget(QtGui.QLabel('Temperature'), 14, 0)
self.data_collection_temperature = QtGui.QLineEdit()
self.data_collection_temperature.setText('')
self.data_collection_temperature.setFixedWidth(300)
grid.addWidget(self.data_collection_temperature, 14, 1)
grid.addWidget(QtGui.QLabel('(e.g. 100) (in Kelvin)'), 14, 2)
grid.addWidget(QtGui.QLabel('Protocol'), 15, 0)
self.data_collection_protocol = QtGui.QLineEdit()
self.data_collection_protocol.setText('SINGLE WAVELENGTH')
self.data_collection_protocol.setFixedWidth(300)
grid.addWidget(self.data_collection_protocol, 15, 1)
grid.addWidget(QtGui.QLabel('(e.g. SINGLE WAVELENGTH, MAD, ...)'), 15, 2)
frame.setLayout(grid)
vb.addWidget(frame)
vb.addStretch(1)
deposit_tab_dict['Methods'][1].addLayout(vb)
## software
vb = QtGui.QVBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('PDB starting model'), 1, 0)
self.pdbx_starting_model = QtGui.QLineEdit()
self.pdbx_starting_model.setText('')
self.pdbx_starting_model.setFixedWidth(300)
grid.addWidget(self.pdbx_starting_model, 1, 1)
grid.addWidget(QtGui.QLabel('(e.g. 7.5 ...)'), 1, 2)
grid.addWidget(QtGui.QLabel('Data reduction'), 2, 0)
self.data_integration_software = QtGui.QComboBox()
for item in XChemMain.data_integration_software(): self.data_integration_software.addItem(item)
grid.addWidget(self.data_integration_software, 2, 1)
grid.addWidget(QtGui.QLabel('Phasing'), 3, 0)
self.phasing_software = QtGui.QComboBox()
for item in XChemMain.phasing_software(): self.phasing_software.addItem(item)
grid.addWidget(self.phasing_software, 3, 1)
frame.setLayout(grid)
vb.addWidget(frame)
vb.addStretch(1)
deposit_tab_dict['Software'][1].addLayout(vb)
## Funding
vb = QtGui.QVBoxLayout()
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Funding Organization'), 1, 0)
self.pdbx_funding_organization_one = QtGui.QLineEdit()
self.pdbx_funding_organization_one.setText('')
self.pdbx_funding_organization_one.setFixedWidth(700)
grid.addWidget(self.pdbx_funding_organization_one, 1, 1)
grid.addWidget(QtGui.QLabel('Grant Number'), 2, 0)
self.pdbx_grant_number_one = QtGui.QLineEdit()
self.pdbx_grant_number_one.setText('')
self.pdbx_grant_number_one.setFixedWidth(700)
grid.addWidget(self.pdbx_grant_number_one, 2, 1)
grid.addWidget(QtGui.QLabel('Country'), 3, 0)
self.pdbx_grant_country_one = QtGui.QComboBox()
for item in XChemMain.pdbx_country(): self.pdbx_grant_country_one.addItem(item)
grid.addWidget(self.pdbx_grant_country_one, 3, 1)
frame.setLayout(grid)
vb.addWidget(frame)
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Funding Organization'), 1, 0)
self.pdbx_funding_organization_two = QtGui.QLineEdit()
self.pdbx_funding_organization_two.setText('')
self.pdbx_funding_organization_two.setFixedWidth(700)
grid.addWidget(self.pdbx_funding_organization_two, 1, 1)
grid.addWidget(QtGui.QLabel('Grant Number'), 2, 0)
self.pdbx_grant_number_two = QtGui.QLineEdit()
self.pdbx_grant_number_two.setText('')
self.pdbx_grant_number_two.setFixedWidth(700)
grid.addWidget(self.pdbx_grant_number_two, 2, 1)
grid.addWidget(QtGui.QLabel('Country'), 3, 0)
self.pdbx_grant_country_two = QtGui.QComboBox()
for item in XChemMain.pdbx_country(): self.pdbx_grant_country_two.addItem(item)
grid.addWidget(self.pdbx_grant_country_two, 3, 1)
frame.setLayout(grid)
vb.addWidget(frame)
frame = QtGui.QFrame()
frame.setFrameShape(QtGui.QFrame.StyledPanel)
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('Funding Organization'), 1, 0)
self.pdbx_funding_organization_three = QtGui.QLineEdit()
self.pdbx_funding_organization_three.setText('')
self.pdbx_funding_organization_three.setFixedWidth(700)
grid.addWidget(self.pdbx_funding_organization_three, 1, 1)
grid.addWidget(QtGui.QLabel('Grant Number'), 2, 0)
self.pdbx_grant_number_three = QtGui.QLineEdit()
self.pdbx_grant_number_three.setText('')
self.pdbx_grant_number_three.setFixedWidth(700)
grid.addWidget(self.pdbx_grant_number_three, 2, 1)
grid.addWidget(QtGui.QLabel('Country'), 3, 0)
self.pdbx_grant_country_three = QtGui.QComboBox()
for item in XChemMain.pdbx_country(): self.pdbx_grant_country_three.addItem(item)
grid.addWidget(self.pdbx_grant_country_three, 3, 1)
frame.setLayout(grid)
vb.addWidget(frame)
vb.addStretch(1)
deposit_tab_dict['Funding'][1].addLayout(vb)
vbox.addWidget(deposit_tab_widget)
hbox = QtGui.QHBoxLayout()
button = QtGui.QPushButton('Load\nFile')
button.clicked.connect(self.load_deposit_config_file)
hbox.addWidget(button)
button = QtGui.QPushButton('Save\nFile')
button.clicked.connect(self.save_deposit_config_file)
hbox.addWidget(button)
button = QtGui.QPushButton('Load from\nDatabase')
button.clicked.connect(self.load_deposit_from_database)
button.setEnabled(False)
hbox.addWidget(button)
button = QtGui.QPushButton('Save to\nDatabase')
button.clicked.connect(self.save_deposit_to_database)
hbox.addWidget(button)
vbox.addLayout(hbox)
depositDataLayout.addLayout(vbox, 0, 0)
depositData.exec_()
def save_deposit_config_file(self):
self.update_deposit_dict()
file_name = str(QtGui.QFileDialog.getSaveFileName(self.window, 'Save file', self.current_directory))
# make sure that the file always has .deposit extension
if str(file_name).rfind('.') != -1:
file_name = file_name[:file_name.rfind('.')] + '.deposit'
else:
file_name = file_name + '.deposit'
pickle.dump(self.deposit_dict, open(file_name, 'wb'))
def update_database_with_pdb_codes(self):
self.work_thread = XChemDeposit.import_PDB_IDs(str(self.pdb_code_entry.toPlainText()),
os.path.join(self.database_directory, self.data_source_file),
self.xce_logfile)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def update_database_with_labelInfo(self):
for n,l in enumerate(self.labelList):
label = str(l[0].text())
description = str(l[1].text())
# print "update labelTable set Label='%s',Description='%s' where ID=%s" %(label,description,str(n+1))
self.db.execute_statement("update labelTable set Label='%s',Description='%s' where ID=%s" %(label,description,str(n+1)))
# print label,description
def load_deposit_config_file(self):
file_name_temp = QtGui.QFileDialog.getOpenFileNameAndFilter(self.window, 'Open file', self.current_directory,
'*.deposit')
file_name = tuple(file_name_temp)[0]
self.deposit_dict = pickle.load(open(file_name, "rb"))
# print self.deposit_dict
for key in self.get_deposit_dict_template():
if key not in self.deposit_dict:
self.update_log.warning('field not in .deposit file: ' + str(key))
self.deposit_dict[key] = ''
self.update_deposit_input()
def load_deposit_from_database(self):
print('hallo')
def save_deposit_to_database(self):
self.update_deposit_dict()
msgBox = QtGui.QMessageBox()
msgBox.setText(
"*** WARNING ***\nAre you sure you want to update the database?\nThis will overwrite previous entries!")
msgBox.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply == 0:
self.work_thread = XChemDeposit.update_depositTable(self.deposit_dict,
os.path.join(self.database_directory,
self.data_source_file),
self.xce_logfile)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def update_deposit_input(self):
try:
self.contact_author_PI_salutation.setText(self.deposit_dict['contact_author_PI_salutation'])
self.contact_author_PI_first_name.setText(self.deposit_dict['contact_author_PI_first_name'])
self.contact_author_PI_last_name.setText(self.deposit_dict['contact_author_PI_last_name'])
self.contact_author_PI_middle_name.setText(self.deposit_dict['contact_author_PI_middle_name'])
index = self.contact_author_PI_role.findText(self.deposit_dict['contact_author_PI_role'],
QtCore.Qt.MatchFixedString)
self.contact_author_PI_role.setCurrentIndex(index)
index = self.contact_author_PI_organization_type.findText(
self.deposit_dict['contact_author_PI_organization_type'], QtCore.Qt.MatchFixedString)
self.contact_author_PI_organization_type.setCurrentIndex(index)
self.contact_author_PI_organization_name.setText(self.deposit_dict['contact_author_PI_organization_name'])
self.contact_author_PI_email.setText(self.deposit_dict['contact_author_PI_email'])
self.contact_author_PI_address.setText(self.deposit_dict['contact_author_PI_address'])
self.contact_author_PI_city.setText(self.deposit_dict['contact_author_PI_city'])
self.contact_author_PI_State_or_Province.setText(self.deposit_dict['contact_author_PI_State_or_Province'])
self.contact_author_PI_Zip_Code.setText(self.deposit_dict['contact_author_PI_Zip_Code'])
self.contact_author_PI_Country.setText(self.deposit_dict['contact_author_PI_Country'])
self.contact_author_PI_phone_number.setText(self.deposit_dict['contact_author_PI_phone_number'])
self.contact_author_PI_ORCID.setText(self.deposit_dict['contact_author_PI_ORCID'])
self.contact_author_salutation.setText(self.deposit_dict['contact_author_salutation'])
self.contact_author_first_name.setText(self.deposit_dict['contact_author_first_name'])
self.contact_author_last_name.setText(self.deposit_dict['contact_author_last_name'])
self.contact_author_middle_name.setText(self.deposit_dict['contact_author_middle_name'])
index = self.contact_author_role.findText(self.deposit_dict['contact_author_role'],
QtCore.Qt.MatchFixedString)
self.contact_author_role.setCurrentIndex(index)
index = self.contact_author_organization_type.findText(
self.deposit_dict['contact_author_organization_type'], QtCore.Qt.MatchFixedString)
self.contact_author_organization_type.setCurrentIndex(index)
self.contact_author_organization_name.setText(self.deposit_dict['contact_author_organization_name'])
self.contact_author_email.setText(self.deposit_dict['contact_author_email'])
self.contact_author_address.setText(self.deposit_dict['contact_author_address'])
self.contact_author_city.setText(self.deposit_dict['contact_author_city'])
self.contact_author_State_or_Province.setText(self.deposit_dict['contact_author_State_or_Province'])
self.contact_author_Zip_Code.setText(self.deposit_dict['contact_author_Zip_Code'])
self.contact_author_Country.setText(self.deposit_dict['contact_author_Country'])
self.contact_author_phone_number.setText(self.deposit_dict['contact_author_phone_number'])
self.contact_author_ORCID.setText(self.deposit_dict['contact_author_ORCID'])
index = self.Release_status_for_coordinates.findText(self.deposit_dict['Release_status_for_coordinates'],
QtCore.Qt.MatchFixedString)
self.Release_status_for_coordinates.setCurrentIndex(index)
index = self.Release_status_for_sequence.findText(self.deposit_dict['Release_status_for_sequence'],
QtCore.Qt.MatchFixedString)
self.Release_status_for_sequence.setCurrentIndex(index)
self.group_deposition_title.setText(self.deposit_dict['group_deposition_title'])
self.group_description.setText(self.deposit_dict['group_description'])
self.structure_title.setText(self.deposit_dict['structure_title'])
self.structure_title_apo.setText(self.deposit_dict['structure_title_apo'])
for n, name in enumerate(self.deposit_dict['structure_author_name'].split(';')):
self.structure_author_name_List[n].setText(name)
self.primary_citation_id.setText(self.deposit_dict['primary_citation_id'])
self.primary_citation_journal_abbrev.setText(self.deposit_dict['primary_citation_journal_abbrev'])
self.primary_citation_title.setText(self.deposit_dict['primary_citation_title'])
self.primary_citation_year.setText(self.deposit_dict['primary_citation_year'])
self.primary_citation_journal_volume.setText(self.deposit_dict['primary_citation_journal_volume'])
self.primary_citation_page_first.setText(self.deposit_dict['primary_citation_page_first'])
self.primary_citation_page_last.setText(self.deposit_dict['primary_citation_page_last'])
for n, name in enumerate(self.deposit_dict['primary_citation_author_name'].split(';')):
self.primary_citation_author_name_List[n].setText(name)
### entity 1
self.molecule_name.setText(self.deposit_dict['molecule_name'])
self.fragment_name_one_specific_mutation.setText(self.deposit_dict['fragment_name_one_specific_mutation'])
index = self.Source_organism_scientific_name.findText(self.deposit_dict['Source_organism_scientific_name'],
QtCore.Qt.MatchFixedString)
self.Source_organism_scientific_name.setCurrentIndex(index)
self.Source_organism_gene.setText(self.deposit_dict['Source_organism_gene'])
self.Source_organism_strain.setText(self.deposit_dict['Source_organism_strain'])
index = self.Expression_system_scientific_name.findText(
self.deposit_dict['Expression_system_scientific_name'], QtCore.Qt.MatchFixedString)
self.Expression_system_scientific_name.setCurrentIndex(index)
self.Expression_system_strain.setText(self.deposit_dict['Expression_system_strain'])
self.Expression_system_vector_type.setText(self.deposit_dict['Expression_system_vector_type'])
self.Expression_system_plasmid_name.setText(self.deposit_dict['Expression_system_plasmid_name'])
self.Manipulated_source_details.setText(self.deposit_dict['Manipulated_source_details'])
# try:
self.molecule_chain_one.setText(self.deposit_dict['molecule_chain_one'])
### entity 2
self.molecule_name_two.setText(self.deposit_dict['molecule_name_two'])
self.fragment_name_two_specific_mutation.setText(self.deposit_dict['fragment_name_two_specific_mutation'])
index = self.Source_organism_scientific_name_two.findText(self.deposit_dict['Source_organism_scientific_name_two'],
QtCore.Qt.MatchFixedString)
self.Source_organism_scientific_name_two.setCurrentIndex(index)
self.Source_organism_gene_two.setText(self.deposit_dict['Source_organism_gene_two'])
self.Source_organism_strain_two.setText(self.deposit_dict['Source_organism_strain_two'])
index = self.Expression_system_scientific_name_two.findText(
self.deposit_dict['Expression_system_scientific_name_two'], QtCore.Qt.MatchFixedString)
self.Expression_system_scientific_name_two.setCurrentIndex(index)
self.Expression_system_strain_two.setText(self.deposit_dict['Expression_system_strain_two'])
self.Expression_system_vector_type_two.setText(self.deposit_dict['Expression_system_vector_type_two'])
self.Expression_system_plasmid_name_two.setText(self.deposit_dict['Expression_system_plasmid_name_two'])
self.Manipulated_source_details_two.setText(self.deposit_dict['Manipulated_source_details_two'])
self.molecule_chain_two.setText(self.deposit_dict['molecule_chain_two'])
self.molecule_one_letter_sequence_uniprot_id_two.setText(
self.deposit_dict['molecule_two_letter_sequence_uniprot_id'])
self.molecule_one_letter_sequence_two.setText(self.deposit_dict['molecule_two_letter_sequence'])
# except KeyError:
# self.molecule_chain_one.setText('')
# ### entity 2
# self.molecule_name_two.setText('')
# self.fragment_name_two_specific_mutation.setText('')
# self.Source_organism_scientific_name_two.setCurrentIndex(0)
# self.Source_organism_gene_two.setText('')
# self.Source_organism_strain_two.setText('')
# self.Expression_system_scientific_name_two.setCurrentIndex(0)
# self.Expression_system_strain_two.setText('')
# self.Expression_system_vector_type_two.setText('')
# self.Expression_system_plasmid_name_two.setText('')
# self.Manipulated_source_details_two.setText('')
# self.molecule_chain_two.setText('')
# self.molecule_one_letter_sequence_uniprot_id_two.setText('')
# self.molecule_one_letter_sequence_two.setText('')
###
self.structure_keywords.setText(self.deposit_dict['structure_keywords'])
self.biological_assembly_chain_number.setText(self.deposit_dict['biological_assembly_chain_number'])
self.molecule_one_letter_sequence_uniprot_id.setText(
self.deposit_dict['molecule_one_letter_sequence_uniprot_id'])
self.molecule_one_letter_sequence.setText(self.deposit_dict['molecule_one_letter_sequence'])
self.SG_project_name.setText(self.deposit_dict['SG_project_name'])
self.full_name_of_SG_center.setText(self.deposit_dict['full_name_of_SG_center'])
index = self.crystallization_method.findText(self.deposit_dict['crystallization_method'],
QtCore.Qt.MatchFixedString)
self.crystallization_method.setCurrentIndex(index)
self.crystallization_pH.setText(self.deposit_dict['crystallization_pH'])
self.crystallization_temperature.setText(self.deposit_dict['crystallization_temperature'])
self.crystallization_details.setText(self.deposit_dict['crystallization_details'])
index = self.radiation_source.findText(self.deposit_dict['radiation_source'], QtCore.Qt.MatchFixedString)
self.radiation_source.setCurrentIndex(index)
index = self.radiation_source_type.findText(self.deposit_dict['radiation_source_type'],
QtCore.Qt.MatchFixedString)
self.radiation_source_type.setCurrentIndex(index)
self.radiation_wavelengths.setText(self.deposit_dict['radiation_wavelengths'])
index = self.radiation_detector.findText(self.deposit_dict['radiation_detector'],
QtCore.Qt.MatchFixedString)
self.radiation_detector.setCurrentIndex(index)
index = self.radiation_detector_type.findText(self.deposit_dict['radiation_detector_type'],
QtCore.Qt.MatchFixedString)
self.radiation_detector_type.setCurrentIndex(index)
self.data_collection_date.setText(self.deposit_dict['data_collection_date'])
self.data_collection_temperature.setText(self.deposit_dict['data_collection_temperature'])
self.data_collection_protocol.setText(self.deposit_dict['data_collection_protocol'])
self.pdbx_starting_model.setText(self.deposit_dict['pdbx_starting_model'])
index = self.data_integration_software.findText(self.deposit_dict['data_integration_software'],
QtCore.Qt.MatchFixedString)
self.data_integration_software.setCurrentIndex(index)
index = self.phasing_software.findText(self.deposit_dict['phasing_software'], QtCore.Qt.MatchFixedString)
self.phasing_software.setCurrentIndex(index)
self.pdbx_funding_organization_one.setText(self.deposit_dict['pdbx_funding_organization_one'])
self.pdbx_grant_number_one.setText(self.deposit_dict['pdbx_grant_number_one'])
index = self.pdbx_grant_country_one.findText(
self.deposit_dict['pdbx_grant_country_one'], QtCore.Qt.MatchFixedString)
self.pdbx_grant_country_one.setCurrentIndex(index)
self.pdbx_funding_organization_two.setText(self.deposit_dict['pdbx_funding_organization_two'])
self.pdbx_grant_number_two.setText(self.deposit_dict['pdbx_grant_number_two'])
index = self.pdbx_grant_country_two.findText(
self.deposit_dict['pdbx_grant_country_two'], QtCore.Qt.MatchFixedString)
self.pdbx_grant_country_two.setCurrentIndex(index)
self.pdbx_funding_organization_three.setText(self.deposit_dict['pdbx_funding_organization_three'])
self.pdbx_grant_number_three.setText(self.deposit_dict['pdbx_grant_number_three'])
index = self.pdbx_grant_country_three.findText(
self.deposit_dict['pdbx_grant_country_three'], QtCore.Qt.MatchFixedString)
self.pdbx_grant_country_three.setCurrentIndex(index)
except ValueError, e:
# self.update_status_bar('Sorry, this is not a XChemExplorer deposit file!')
self.update_log.error('file is not a valid .deposit file: ' + str(e))
def update_deposit_dict(self):
pdbx_funding_ordinal_one = ''
pdbx_funding_organization_one = ''
pdbx_grant_number_one = ''
pdbx_grant_country_one = ''
if str(self.pdbx_funding_organization_one.text()).replace(' ','') != '':
pdbx_funding_ordinal_one = '1'
pdbx_funding_organization_one = str(self.pdbx_funding_organization_one.text())
pdbx_grant_number_one = str(self.pdbx_grant_number_one.text())
pdbx_grant_country_one = str(self.pdbx_grant_country_one.currentText())
pdbx_funding_ordinal_two = ''
pdbx_funding_organization_two = ''
pdbx_grant_number_two = ''
pdbx_grant_country_two = ''
if str(self.pdbx_funding_organization_two.text()).replace(' ','') != '':
pdbx_funding_ordinal_two = '2'
pdbx_funding_organization_two = str(self.pdbx_funding_organization_two.text())
pdbx_grant_number_two = str(self.pdbx_grant_number_two.text())
pdbx_grant_country_two = str(self.pdbx_grant_country_two.currentText())
pdbx_funding_ordinal_three = ''
pdbx_funding_organization_three = ''
pdbx_grant_number_three = ''
pdbx_grant_country_three = ''
if str(self.pdbx_funding_organization_three.text()).replace(' ','') != '':
pdbx_funding_ordinal_three = '3'
pdbx_funding_organization_three = str(self.pdbx_funding_organization_three.text())
pdbx_grant_number_three = str(self.pdbx_grant_number_three.text())
pdbx_grant_country_three = str(self.pdbx_grant_country_three.currentText())
self.deposit_dict = {
'contact_author_PI_salutation': str(self.contact_author_PI_salutation.text()),
'contact_author_PI_first_name': str(self.contact_author_PI_first_name.text()),
'contact_author_PI_last_name': str(self.contact_author_PI_last_name.text()),
'contact_author_PI_middle_name': str(self.contact_author_PI_middle_name.text()),
'contact_author_PI_role': str(self.contact_author_PI_role.currentText()),
'contact_author_PI_organization_type': str(self.contact_author_PI_organization_type.currentText()),
'contact_author_PI_organization_name': str(self.contact_author_PI_organization_name.text()),
'contact_author_PI_email': str(self.contact_author_PI_email.text()),
'contact_author_PI_address': str(self.contact_author_PI_address.text()),
'contact_author_PI_city': str(self.contact_author_PI_city.text()),
'contact_author_PI_State_or_Province': str(self.contact_author_PI_State_or_Province.text()),
'contact_author_PI_Zip_Code': str(self.contact_author_PI_Zip_Code.text()),
'contact_author_PI_Country': str(self.contact_author_PI_Country.text()),
'contact_author_PI_phone_number': str(self.contact_author_PI_phone_number.text()),
'contact_author_PI_ORCID': str(self.contact_author_PI_ORCID.text()),
'contact_author_salutation': str(self.contact_author_salutation.text()),
'contact_author_first_name': str(self.contact_author_first_name.text()),
'contact_author_last_name': str(self.contact_author_last_name.text()),
'contact_author_middle_name': str(self.contact_author_middle_name.text()),
'contact_author_role': str(self.contact_author_role.currentText()),
'contact_author_organization_type': str(self.contact_author_organization_type.currentText()),
'contact_author_organization_name': str(self.contact_author_organization_name.text()),
'contact_author_email': str(self.contact_author_email.text()),
'contact_author_address': str(self.contact_author_address.text()),
'contact_author_city': str(self.contact_author_city.text()),
'contact_author_State_or_Province': str(self.contact_author_State_or_Province.text()),
'contact_author_Zip_Code': str(self.contact_author_Zip_Code.text()),
'contact_author_Country': str(self.contact_author_Country.text()),
'contact_author_phone_number': str(self.contact_author_phone_number.text()),
'contact_author_ORCID': str(self.contact_author_ORCID.text()),
'Release_status_for_coordinates': str(self.Release_status_for_coordinates.currentText()),
'Release_status_for_sequence': str(self.Release_status_for_sequence.currentText()),
'group_deposition_title': str(self.group_deposition_title.text()),
'group_description': str(self.group_description.text()),
'structure_title': str(self.structure_title.text()),
'structure_title_apo': str(self.structure_title_apo.text()),
'primary_citation_id': str(self.primary_citation_id.text()),
'primary_citation_journal_abbrev': str(self.primary_citation_journal_abbrev.text()),
'primary_citation_title': str(self.primary_citation_title.text()),
'primary_citation_year': str(self.primary_citation_year.text()),
'primary_citation_journal_volume': str(self.primary_citation_journal_volume.text()),
'primary_citation_page_first': str(self.primary_citation_page_first.text()),
'primary_citation_page_last': str(self.primary_citation_page_last.text()),
### entity 1
'molecule_name': str(self.molecule_name.text()),
'Source_organism_scientific_name': str(self.Source_organism_scientific_name.currentText()),
'Source_organism_gene': str(self.Source_organism_gene.text()),
'Source_organism_strain': str(self.Source_organism_strain.text()),
'Expression_system_scientific_name': str(self.Expression_system_scientific_name.currentText()),
'Expression_system_strain': str(self.Expression_system_strain.text()),
'Expression_system_plasmid_name': str(self.Expression_system_plasmid_name.text()),
'Expression_system_vector_type': str(self.Expression_system_vector_type.text()),
'Manipulated_source_details': str(self.Manipulated_source_details.text()),
'fragment_name_one_specific_mutation': str(self.fragment_name_one_specific_mutation.text()),
'molecule_chain_one': str(self.molecule_chain_one.text()),
### entity 2
'molecule_name_two': str(self.molecule_name_two.text()),
'Source_organism_scientific_name_two': str(self.Source_organism_scientific_name_two.currentText()),
'Source_organism_gene_two': str(self.Source_organism_gene_two.text()),
'Source_organism_strain_two': str(self.Source_organism_strain_two.text()),
'Expression_system_scientific_name_two': str(self.Expression_system_scientific_name_two.currentText()),
'Expression_system_strain_two': str(self.Expression_system_strain_two.text()),
'Expression_system_plasmid_name_two': str(self.Expression_system_plasmid_name_two.text()),
'Expression_system_vector_type_two': str(self.Expression_system_vector_type_two.text()),
'Manipulated_source_details_two': str(self.Manipulated_source_details_two.text()),
'fragment_name_two_specific_mutation': str(self.fragment_name_two_specific_mutation.text()),
'molecule_chain_two': str(self.molecule_chain_two.text()),
'structure_keywords': str(self.structure_keywords.text()),
'biological_assembly_chain_number': str(self.biological_assembly_chain_number.text()),
'molecule_one_letter_sequence_uniprot_id': str(self.molecule_one_letter_sequence_uniprot_id.text()),
'molecule_two_letter_sequence_uniprot_id': str(self.molecule_one_letter_sequence_uniprot_id_two.text()),
'SG_project_name': str(self.SG_project_name.text()),
'full_name_of_SG_center': str(self.full_name_of_SG_center.text()),
'molecule_one_letter_sequence': str(self.molecule_one_letter_sequence.toPlainText()).replace(' ',
'').replace(
'\n', '').replace('\r', ''),
'molecule_two_letter_sequence': str(self.molecule_one_letter_sequence_two.toPlainText()).replace(' ',
'').replace(
'\n', '').replace('\r', ''),
'crystallization_method': str(self.crystallization_method.currentText()),
'crystallization_pH': str(self.crystallization_pH.text()),
'crystallization_temperature': str(self.crystallization_temperature.text()),
'crystallization_details': str(self.crystallization_details.text()),
'radiation_source': str(self.radiation_source.currentText()),
'radiation_source_type': str(self.radiation_source_type.currentText()),
'radiation_wavelengths': str(self.radiation_wavelengths.text()),
'radiation_detector': str(self.radiation_detector.currentText()),
'radiation_detector_type': str(self.radiation_detector_type.currentText()),
'data_collection_date': str(self.data_collection_date.text()),
'data_collection_temperature': str(self.data_collection_temperature.text()),
'data_collection_protocol': str(self.data_collection_protocol.text()),
'pdbx_starting_model': str(self.pdbx_starting_model.text()),
'data_integration_software': str(self.data_integration_software.currentText()),
'phasing_software': str(self.phasing_software.currentText()),
'pdbx_funding_ordinal_one': pdbx_funding_ordinal_one,
'pdbx_funding_organization_one': pdbx_funding_organization_one,
'pdbx_grant_number_one': pdbx_grant_number_one,
'pdbx_grant_country_one': pdbx_grant_country_one,
'pdbx_funding_ordinal_two': pdbx_funding_ordinal_two,
'pdbx_funding_organization_two': pdbx_funding_organization_two,
'pdbx_grant_number_two': pdbx_grant_number_two,
'pdbx_grant_country_two': pdbx_grant_country_two,
'pdbx_funding_ordinal_three': pdbx_funding_ordinal_three,
'pdbx_funding_organization_three': pdbx_funding_organization_three,
'pdbx_grant_number_three': pdbx_grant_number_three,
'pdbx_grant_country_three': pdbx_grant_country_three
}
structure_author_name = ''
for widget in self.structure_author_name_List:
structure_author_name += str(widget.text()) + ';'
self.deposit_dict['structure_author_name'] = structure_author_name[:-1]
primary_citation_author_name = ''
for widget in self.primary_citation_author_name_List:
primary_citation_author_name += str(widget.text()) + ';'
self.deposit_dict['primary_citation_author_name'] = primary_citation_author_name[:-1]
def get_deposit_dict_template(self):
deposit_dict_template = {
'contact_author_PI_salutation': None,
'contact_author_PI_first_name': None,
'contact_author_PI_last_name': None,
'contact_author_PI_middle_name': None,
'contact_author_PI_role': None,
'contact_author_PI_organization_type': None,
'contact_author_PI_organization_name': None,
'contact_author_PI_email': None,
'contact_author_PI_address': None,
'contact_author_PI_city': None,
'contact_author_PI_State_or_Province': None,
'contact_author_PI_Zip_Code': None,
'contact_author_PI_Country': None,
'contact_author_PI_phone_number': None,
'contact_author_PI_ORCID': None,
'contact_author_salutation': None,
'contact_author_first_name': None,
'contact_author_last_name': None,
'contact_author_middle_name': None,
'contact_author_role': None,
'contact_author_organization_type': None,
'contact_author_organization_name': None,
'contact_author_email': None,
'contact_author_address': None,
'contact_author_city': None,
'contact_author_State_or_Province': None,
'contact_author_Zip_Code': None,
'contact_author_Country': None,
'contact_author_phone_number': None,
'contact_author_ORCID': None,
'Release_status_for_coordinates': None,
'Release_status_for_sequence': None,
'group_deposition_title': None,
'group_description': None,
'structure_title': None,
'structure_title_apo': None,
'primary_citation_id': None,
'primary_citation_journal_abbrev': None,
'primary_citation_title': None,
'primary_citation_year': None,
'primary_citation_journal_volume': None,
'primary_citation_page_first': None,
'primary_citation_page_last': None,
### entity 1
'molecule_name': None,
'Source_organism_scientific_name': None,
'Source_organism_gene': None,
'Source_organism_strain': None,
'Expression_system_scientific_name': None,
'Expression_system_strain': None,
'Expression_system_plasmid_name': None,
'Expression_system_vector_type': None,
'Manipulated_source_details': None,
'fragment_name_one_specific_mutation': None,
'molecule_chain_one': None,
### entity 2
'molecule_name_two': None,
'Source_organism_scientific_name_two': None,
'Source_organism_gene_two': None,
'Source_organism_strain_two': None,
'Expression_system_scientific_name_two': None,
'Expression_system_strain_two': None,
'Expression_system_plasmid_name_two': None,
'Expression_system_vector_type_two': None,
'Manipulated_source_details_two': None,
'fragment_name_two_specific_mutation': None,
'molecule_chain_two': None,
'structure_keywords': None,
'biological_assembly_chain_number': None,
'molecule_one_letter_sequence_uniprot_id': None,
'molecule_two_letter_sequence_uniprot_id': None,
'SG_project_name': None,
'full_name_of_SG_center': None,
'molecule_one_letter_sequence': None,
'molecule_two_letter_sequence': None,
'crystallization_method': None,
'crystallization_pH': None,
'crystallization_temperature': None,
'crystallization_details': None,
'radiation_source': None,
'radiation_source_type': None,
'radiation_wavelengths': None,
'radiation_detector': None,
'radiation_detector_type': None,
'data_collection_date': None,
'data_collection_temperature': None,
'data_collection_protocol': None,
'pdbx_starting_model': None,
'data_integration_software': None,
'phasing_software': None,
'structure_author_name': None,
'primary_citation_author_name': None,
'pdbx_funding_organization_one': '',
'pdbx_grant_number_one': '',
'pdbx_grant_country_one': '',
'pdbx_funding_organization_two': '',
'pdbx_grant_number_two': '',
'pdbx_grant_country_two': '',
'pdbx_funding_organization_three': '',
'pdbx_grant_number_three': '',
'pdbx_grant_country_three': ''
}
return deposit_dict_template
def set_primary_citation_as_structure_authors(self, state):
if state == QtCore.Qt.Checked:
for n, entry in enumerate(self.structure_author_name_List):
self.primary_citation_author_name_List[n].setText(str(entry.text()))
else:
for n, entry in enumerate(self.primary_citation_author_name_List):
entry.setText('')
def set_xce_logfile(self):
file_name = str(QtGui.QFileDialog.getSaveFileName(self.window, 'Save file', self.current_directory))
self.xce_logfile = str(file_name)
self.xce_logfile_label.setText(str(self.xce_logfile))
if self.xce_logfile == '' or self.xce_logfile[self.xce_logfile.rfind('/') + 1:] == '':
print('==> XCE: invalid file format')
else:
XChemLog.startLog(self.xce_logfile).create_logfile(self.xce_version)
self.update_log = XChemLog.updateLog(self.xce_logfile)
def set_second_cif_file(self):
filepath_temp = QtGui.QFileDialog.getOpenFileNameAndFilter(self.window, 'Select CIF File',
self.initial_model_directory, '*.cif')
filepath = str(tuple(filepath_temp)[0])
self.second_cif_file = str(filepath)
self.second_cif_file_label.setText(str(self.second_cif_file))
self.update_log.insert('user selected %s as CIF file for merging into ligand CIF files' %self.second_cif_file)
def select_datasource_columns_to_display(self):
columns_to_show = QtGui.QMessageBox()
columns_to_showLayout = columns_to_show.layout()
columns_in_data_source = self.db.return_column_list()
try:
columns_in_data_source = self.db.return_column_list()
except AttributeError:
print('==> XCE: please select a datasource file')
self.status_bar.showMessage('please select a datasource file')
return
column_dict = {}
vbox = QtGui.QVBoxLayout()
number_of_entries = len(columns_in_data_source)
columns_shown_in_dialog_column = 15
grid = QtGui.QGridLayout()
x = 0
y = 0
columns_to_ignore = self.db.columns_not_to_display()
for entries_added in range(number_of_entries):
if not columns_in_data_source[entries_added][1] in columns_to_ignore:
data_source_column = QtGui.QCheckBox(columns_in_data_source[entries_added][1])
column_dict[entries_added] = data_source_column
if columns_in_data_source[entries_added][1] in self.overview_datasource_table_columns:
data_source_column.setChecked(True)
grid.addWidget(data_source_column, y, x)
y += 1
if y == columns_shown_in_dialog_column:
y = 0
x += 1
vbox.addLayout(grid)
columns_to_showLayout.addLayout(vbox, 0, 0)
columns_to_show.addButton(QtGui.QPushButton('OK'), QtGui.QMessageBox.YesRole)
columns_to_show.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
reply = columns_to_show.exec_();
if reply == 0:
columns_to_show_list = ['Sample ID']
for key in column_dict:
if column_dict[key].isChecked():
columns_to_show_list.append(columns_in_data_source[key][1])
self.overview_datasource_table_columns = columns_to_show_list
self.populate_and_update_datasource_table()
def update_header_and_data_from_datasource(self):
self.update_log.insert('getting information for all samples from data source...')
self.db = XChemDB.data_source(os.path.join(self.database_directory, self.data_source_file))
self.update_log.insert('creating missing columns in data source')
self.db.create_missing_columns()
self.update_log.insert('load header and data from data source')
self.header, self.data = self.db.load_samples_from_data_source()
self.update_log.insert('get all samples in data source')
all_samples_in_db = self.db.execute_statement("select CrystalName from mainTable where CrystalName is not '';")
self.xtal_db_dict = {}
sampleID_column = 0
for n, entry in enumerate(self.header):
if entry == 'CrystalName':
sampleID_column = n
break
for line in self.data:
if str(line[sampleID_column]) != '':
db_dict = {}
for n, entry in enumerate(line):
if n != sampleID_column:
db_dict[str(self.header[n])] = str(entry)
self.xtal_db_dict[str(line[sampleID_column])] = db_dict
print('==> XCE: found ' + str(len(self.xtal_db_dict)) + ' samples')
def datasource_menu_save_samples(self):
print('hallo')
def datasource_menu_export_csv_file(self):
file_name = str(QtGui.QFileDialog.getSaveFileName(self.window, 'Save file', self.database_directory))
if file_name.rfind('.') != -1:
file_name = file_name[:file_name.rfind('.')] + '.csv'
else:
file_name = file_name + '.csv'
self.db.export_to_csv_file(file_name)
def datasource_menu_import_csv_file(self):
if self.data_source_set:
file_name = QtGui.QFileDialog.getOpenFileName(self.window, 'Open file', self.database_directory)
self.db.import_csv_file(file_name)
else:
self.update_status_bar('Please load a data source file first')
def datasource_menu_update_datasource(self):
self.work_thread = XChemThread.synchronise_db_and_filesystem(self.initial_model_directory,
os.path.join(self.database_directory,
self.data_source_file),
self.panddas_directory, self.xce_logfile,
'project_directory')
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("datasource_menu_reload_samples"),
self.datasource_menu_reload_samples)
self.work_thread.start()
def export_data_for_WONKA(self):
self.update_log.insert('exporting CSV file for input into WONKA')
self.db.export_csv_for_WONKA()
def on_context_menu(self, point):
# show context menu
for key in self.dewar_configuration_dict:
if self.dewar_configuration_dict[key] == self.sender():
self.dewar_label_active = key
self.popMenu.exec_(self.sender().mapToGlobal(point))
def on_context_menu_reprocess_data(self, point):
# show context menu
self.popMenu_for_datasets_reprocess_table.exec_(self.sender().mapToGlobal(point))
def flag_sample_for_recollection(self):
self.dewar_configuration_dict[self.dewar_label_active].setStyleSheet("background-color: yellow")
def undo_flag_sample_for_recollection(self):
self.dewar_configuration_dict[self.dewar_label_active].setStyleSheet("background-color: gray")
def show_html_summary_in_firefox(self, xtal):
html_summary = self.albula_button_dict[xtal][2]
print('html_summary', html_summary)
new = 2
webbrowser.open(html_summary, new=new)
def update_pandda_crystal_from_combobox(self):
self.pandda_analyse_crystal_from_selection_combobox.clear()
self.pandda_analyse_crystal_from_selection_combobox.addItem('use all datasets')
if os.path.isfile(os.path.join(self.database_directory, self.data_source_file)):
self.load_crystal_form_from_datasource()
if self.xtalform_dict != {}:
print(self.xtalform_dict)
for key in self.xtalform_dict:
self.pandda_analyse_crystal_from_selection_combobox.addItem(key)
def populate_reference_combobox(self, combobox):
combobox.clear()
for reference_file in self.reference_file_list:
combobox.addItem(reference_file[0])
def populate_refinement_outcome_combobox(self, combobox):
combobox.clear()
for stage in self.refinement_stage:
combobox.addItem(stage)
def populate_target_selection_combobox(self, combobox):
combobox.clear()
for target in self.target_list:
combobox.addItem(target)
def combo_selected(self, text):
self.map_url = str(self.panddas_directory + '/analyses/html_summaries/pandda_map_' + text + '.html')
self.pandda_maps_html.load(QtCore.QUrl(self.map_url))
self.pandda_maps_html.show()
def add_map_html(self):
self.map_list = glob.glob(str(self.panddas_directory + '/analyses/html_summaries/pandda_map_*.html'))
self.list_options = []
for i in range(0, len(self.map_list)):
string = self.map_list[i]
string = string.replace('/analyses/html_summaries/pandda_map_', '')
string = string.replace('.html', '')
string = string.replace(self.panddas_directory, '')
self.list_options.append(string)
self.pandda_map_list.clear()
for i in range(0, len(self.list_options)):
self.pandda_map_list.addItem(self.list_options[i])
self.connect(self.pandda_map_list, QtCore.SIGNAL('activated(QString)'), self.combo_selected)
def open_config_file(self):
file_name_temp = QtGui.QFileDialog.getOpenFileNameAndFilter(self.window, 'Open file', self.current_directory,
'*.conf')
file_name = tuple(file_name_temp)[0]
try:
pickled_settings = pickle.load(open(file_name, 'rb'))
except:
print('==> XCE: failed to open config file...')
key_list = {#'beamline_directory': 'beamline_directory',
'initial_model_directory': 'initial_model_directory',
'panddas_directory': 'panddas_directory',
'html_export_directory': 'html_export_directory',
'group_deposit_directory': 'group_deposit_directory',
'database_directory': 'database_directory',
'datasets_summary_file': 'datasets_summary',
#"'data_source_file': 'data_source',
'ccp4_scratch_directory': 'ccp4_scratch',
'allowed_unitcell_difference_percent': 'unitcell_difference',
'acceptable_low_resolution_limit_for_data': 'too_low_resolution_data',
#'reference_directory_temp': 'reference_directory'
}
# self.pandda_input_data_dir_entry.setText(os.path.join(self.initial_model_directory, '*'))
for current_key in key_list:
try:
command = str('self.' + current_key + " = pickled_settings['" + key_list[current_key] +"']")
exec(command)
command = str('self.settings["' + key_list[current_key]+ '"]= self.' + current_key)
exec(command)
print('==> XCE: found ' + key_list[current_key])
except:
print('==> XCE: WARNING: Failed to find settings for: ' + key_list[current_key] + ' Error type: '
+ str(sys.exc_info()[0]))
exec(str(current_key + " = ''"))
continue
try:
pickled_settings = pickle.load(open(file_name, "rb"))
if pickled_settings['beamline_directory'] != self.beamline_directory:
self.beamline_directory = pickled_settings['beamline_directory']
self.target_list, self.visit_list = XChemMain.get_target_and_visit_list(self.beamline_directory,self.read_agamemnon.isChecked())
self.settings['beamline_directory'] = self.beamline_directory
self.populate_target_selection_combobox(self.target_selection_combobox)
self.layout_funcs.pandda_html(self)
self.show_pandda_html_summary()
self.html_export_directory_label.setText(self.html_export_directory)
self.group_deposition_directory_label.setText(self.group_deposit_directory)
self.datasets_summary_file_label.setText(self.datasets_summary_file)
self.data_source_file = pickled_settings['data_source']
if self.data_source_file != '':
self.settings['data_source'] = os.path.join(self.database_directory, self.data_source_file)
# this is probably not necessary
if os.path.isfile(self.settings['data_source']):
write_enabled = self.check_write_permissions_of_data_source()
if not write_enabled:
self.data_source_file_label.setText('')
self.data_source_set = False
else:
self.data_source_file_label.setText(
os.path.join(self.database_directory, self.data_source_file))
self.data_source_set = True
self.db = XChemDB.data_source(os.path.join(self.database_directory, self.data_source_file))
self.datasource_menu_reload_samples()
reference_directory_temp = pickled_settings['reference_directory']
if reference_directory_temp != self.reference_directory:
self.reference_directory = reference_directory_temp
self.settings['reference_directory'] = self.reference_directory
self.update_reference_files(' ')
for xtal in self.initial_model_dimple_dict:
reference_file_selection_combobox = self.initial_model_dimple_dict[xtal][1]
self.populate_reference_combobox(reference_file_selection_combobox)
self.initial_model_directory_label.setText(self.initial_model_directory)
self.panddas_directory_label.setText(self.panddas_directory)
self.pandda_output_data_dir_entry.setText(self.panddas_directory)
self.reference_directory_label.setText(self.reference_directory)
self.beamline_directory_label.setText(self.beamline_directory)
self.ccp4_scratch_directory_label.setText(self.ccp4_scratch_directory)
self.reference_file_list = self.get_reference_file_list(' ')
self.pandda_input_data_dir_entry.setText(os.path.join(self.initial_model_directory, '*'))
self.update_all_tables()
except KeyError:
self.update_status_bar('Sorry, this is not a XChemExplorer config file!')
self.update_log.insert('Sorry, this is not a XChemExplorer config file!')
except:
print("Unexpected error:", sys.exc_info()[0])
raise
def save_config_file(self):
file_name = str(QtGui.QFileDialog.getSaveFileName(self.window, 'Save file', self.current_directory))
# make sure that the file always has .conf extension
if str(file_name).rfind('.') != -1:
file_name = file_name[:file_name.rfind('.')] + '.conf'
else:
file_name = file_name + '.conf'
pickle.dump(self.settings, open(file_name, 'wb'))
def update_reference_files(self, reference_root):
self.reference_file_list = self.get_reference_file_list(reference_root)
self.populate_reference_combobox(self.reference_file_selection_combobox)
self.populate_reference_combobox(self.pandda_reference_file_selection_combobox)
def check_status_rerun_dimple_on_all_autoprocessing_files(self):
print('hallo')
def rerun_dimple_on_all_autoprocessing_files(self):
job_list = []
self.update_log.insert('preparing to run DIMPLE on all autoprocessing files')
for xtal in self.data_collection_dict:
for entry in self.data_collection_dict[xtal]:
if entry[0] == 'logfile':
db_dict = entry[6]
try:
if os.path.isfile(os.path.join(db_dict['DataProcessingPathToMTZfile'],
db_dict['DataProcessingMTZfileName'])) or \
os.path.isfile(os.path.join(db_dict['DataProcessingPathToMTZfile'])):
job_list = self.get_job_list_for_dimple_rerun(xtal, job_list, db_dict, entry)
except KeyError:
try:
if os.path.isfile(os.path.join(db_dict['DataProcessingPathToMTZfile'])):
job_list = self.get_job_list_for_dimple_rerun(xtal, job_list, db_dict, entry)
except KeyError:
continue
if job_list:
self.update_log.insert('trying to run DIMPLE on ALL auto-processing files')
self.check_before_running_dimple(job_list)
def run_dimple_on_selected_autoprocessing_file(self, instruction):
job_list = []
for xtal in sorted(self.initial_model_dimple_dict):
# print(xtal)
if self.initial_model_dimple_dict[xtal][0].isChecked():
# print(xtal + ' is checked...')
db_dict = self.xtal_db_dict[xtal]
# the if statement below is so convoluted, so that it is compatible with older data source files
if os.path.isfile(
os.path.join(db_dict['ProjectDirectory'], xtal, db_dict['DataProcessingPathToMTZfile'],
db_dict['DataProcessingMTZfileName'])) or \
os.path.isfile(
os.path.join(db_dict['ProjectDirectory'], xtal, db_dict['DataProcessingPathToMTZfile'])) or \
os.path.isfile(os.path.join(db_dict['DataProcessingPathToMTZfile'],
db_dict['DataProcessingMTZfileName'])) or \
os.path.isfile(os.path.join(db_dict['DataProcessingPathToMTZfile'])):
if os.path.isfile(
os.path.join(db_dict['DataProcessingPathToMTZfile'], db_dict['DataProcessingMTZfileName'])):
mtzin = os.path.join(db_dict['DataProcessingPathToMTZfile'],
db_dict['DataProcessingMTZfileName'])
elif os.path.isfile(os.path.join(db_dict['DataProcessingPathToMTZfile'])):
mtzin = os.path.join(db_dict['DataProcessingPathToMTZfile'])
elif os.path.isfile(
os.path.join(db_dict['ProjectDirectory'], xtal, db_dict['DataProcessingPathToMTZfile'],
db_dict['DataProcessingMTZfileName'])):
mtzin = os.path.join(db_dict['ProjectDirectory'], xtal, db_dict['DataProcessingPathToMTZfile'],
db_dict['DataProcessingMTZfileName'])
elif os.path.isfile(
os.path.join(db_dict['ProjectDirectory'], xtal, db_dict['DataProcessingPathToMTZfile'])):
mtzin = os.path.join(db_dict['ProjectDirectory'], xtal, db_dict['DataProcessingPathToMTZfile'])
reference_file = str(self.initial_model_dimple_dict[xtal][1].currentText())
reference_file_pdb = os.path.join(self.reference_directory, reference_file + '.pdb')
if not os.path.isfile(reference_file_pdb):
continue
if os.path.isfile(os.path.join(self.reference_directory, reference_file + '.mtz')):
reference_file_mtz = ' -R ' + os.path.join(self.reference_directory, reference_file + '.mtz')
else:
reference_file_mtz = ''
if os.path.isfile(os.path.join(self.reference_directory, reference_file + '.cif')):
reference_file_cif = ' --libin ' + os.path.join(self.reference_directory,
reference_file + '.cif')
else:
reference_file_cif = ''
job_list.append([xtal,
'dimple_rerun_on_selected_file',
mtzin,
reference_file_pdb,
reference_file_mtz,
reference_file_cif])
else:
print('WARNING: ' + xtal + ' has not been submitted to dimple because no files were found: ')
if not os.path.isfile(os.path.join(db_dict['ProjectDirectory'], xtal, db_dict['DataProcessingPathToMTZfile'],
db_dict['DataProcessingMTZfileName'])):
print(' ' + str(os.path.join(db_dict['ProjectDirectory'], xtal, db_dict['DataProcessingPathToMTZfile'],
db_dict['DataProcessingMTZfileName'])) + ' is missing')
if not os.path.isfile(os.path.join(db_dict['ProjectDirectory'], xtal, db_dict['DataProcessingPathToMTZfile'])):
print(' ' + str(os.path.join(db_dict['ProjectDirectory'], xtal, db_dict['DataProcessingPathToMTZfile'])) + ' is missing')
if not os.path.isfile(os.path.join(db_dict['DataProcessingPathToMTZfile'])):
print(' ' + str(os.path.join(db_dict['DataProcessingPathToMTZfile']) + ' is missing'))
if job_list:
self.update_log.insert('trying to run DIMPLE on SELECTED auto-processing files')
self.check_before_running_dimple(job_list,instruction)
def remove_selected_dimple_files(self,instruction):
if 'dimple' in instruction.lower():
pipeline = 'dimple'
elif 'pipedream' in instruction.lower():
pipeline = 'pipedream'
elif 'phenix' in instruction.lower():
pipeline = 'phenix.ligand_pipeline'
job_list = []
for xtal in sorted(self.initial_model_dimple_dict):
if self.initial_model_dimple_dict[xtal][0].isChecked():
job_list.append(xtal)
if job_list:
msgBox = QtGui.QMessageBox()
msgBox.setText("Do you really want to delete {0!s} {1!s} files?".format(len(job_list),self.preferences['initial_refinement_pipeline']))
msgBox.addButton(QtGui.QPushButton('Go'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply == 0:
self.status_bar.showMessage('preparing to remove {0!s} files'.format(pipeline))
self.update_log.insert('preparing to remove {0!s} files'.format(pipeline))
self.work_thread = XChemThread.remove_selected_dimple_files(job_list,
self.initial_model_directory,
self.xce_logfile,
self.database_directory,
self.data_source_file,
pipeline)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("datasource_menu_reload_samples"),
self.datasource_menu_reload_samples)
self.work_thread.start()
def set_results_from_selected_pipeline(self,instruction):
if 'dimple' in instruction.lower():
pipeline = 'dimple'
elif 'pipedream' in instruction.lower():
pipeline = 'pipedream'
elif 'phenix' in instruction.lower():
pipeline = 'phenix.ligand_pipeline'
self.update_log.warning('selecting initial refinement results from '+pipeline)
job_list = []
for xtal in sorted(self.initial_model_dimple_dict):
if self.initial_model_dimple_dict[xtal][0].isChecked():
job_list.append(xtal)
self.work_thread = XChemThread.set_results_from_selected_pipeline(job_list,
self.initial_model_directory,
self.xce_logfile,
self.database_directory,
self.data_source_file,
pipeline)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("datasource_menu_reload_samples"),
self.datasource_menu_reload_samples)
self.work_thread.start()
def run_xia2_on_selected_datasets(self, overwrite):
# check which programs should be run
protocol = []
if self.xia2_3d_checkbox.isChecked():
protocol.append('3d')
if self.xia2_3dii_checkbox.isChecked():
protocol.append('3dii')
if self.xia2_dials_checkbox.isChecked():
protocol.append('dials')
# space group
spg = []
if str(self.reprocess_space_group_comboxbox.currentText()) != 'ignore':
spg.append(str(self.reprocess_space_group_comboxbox.currentText()))
# reference file
ref = []
if os.path.isfile(self.diffraction_data_reference_mtz):
ref.append(self.diffraction_data_reference_mtz)
# resolution limit
reso_limit = []
if str(self.reprocess_isigma_combobox.currentText()) != 'default':
reso_limit.append(str(self.reprocess_isigma_combobox.currentText()))
# cc 1/2
cc_half = []
if str(self.reprocess_cc_half_combobox.currentText()) != 'default':
cc_half.append(str(self.reprocess_cc_half_combobox.currentText()))
run_dict = {}
allRows = self.datasets_reprocess_table.rowCount()
for row in xrange(0, allRows):
dataset_id = str(self.datasets_reprocess_table.item(row, 0).text())
sample_id = str(self.datasets_reprocess_table.item(row, 1).text())
if self.diffraction_data_table_dict[dataset_id][0].isChecked():
run_dict[sample_id] = self.diffraction_data_dict[dataset_id]
if protocol != [] and run_dict != {}:
self.work_thread = XChemProcess.run_xia2(self.initial_model_directory,
run_dict,
protocol,
spg,
ref,
reso_limit,
cc_half,
self.xce_logfile,
self.external_software,
self.ccp4_scratch_directory,
self.max_queue_jobs,
os.path.join(self.database_directory, self.data_source_file),
overwrite)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
else:
self.update_log.insert('please select datasets and/ or data processing protocol')
self.update_status_bar('please select datasets and/ or data processing protocol')
def update_reprocessing_table(self):
allRows = self.datasets_reprocess_table.rowCount()
for row in xrange(0, allRows):
sample_id = str(self.datasets_reprocess_table.item(row, 1).text())
if sample_id in self.xtal_db_dict:
db_dict = self.xtal_db_dict[sample_id]
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(db_dict['DataProcessingStatus'])
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
if db_dict['DataProcessingStatus'] == 'running':
cell_text.setBackground(QtGui.QColor(100, 230, 150))
elif db_dict['DataProcessingStatus'] == 'pending':
cell_text.setBackground(QtGui.QColor(20, 100, 230))
elif db_dict['DataProcessingStatus'] == 'started':
cell_text.setBackground(QtGui.QColor(230, 240, 110))
elif db_dict['DataProcessingStatus'] == 'finished':
cell_text.setBackground(QtGui.QColor(255, 255, 255))
self.datasets_reprocess_table.setItem(row, 7, cell_text)
def get_job_list_for_dimple_rerun(self, xtal, job_list, db_dict, entry):
self.status_bar.showMessage('checking: ' + str(
os.path.join(db_dict['DataProcessingPathToMTZfile'], db_dict['DataProcessingMTZfileName'])))
suitable_reference = []
for reference in self.reference_file_list:
# first we need one in the same pointgroup
if reference[5] == db_dict['DataProcessingPointGroup']:
try:
difference = math.fabs(1 - (float(db_dict['DataProcessingUnitCellVolume']) / float(reference[4])))
suitable_reference.append([reference[0], difference])
except ValueError:
continue
if suitable_reference:
reference_file = min(suitable_reference, key=lambda x: x[1])[0]
visit = entry[1]
run = entry[2]
autoproc = entry[4]
reference_file_pdb = os.path.join(self.reference_directory, reference_file + '.pdb')
if os.path.isfile(os.path.join(self.reference_directory, reference_file + '.mtz')):
reference_file_mtz = ' -R ' + os.path.join(self.reference_directory, reference_file + '.mtz')
else:
reference_file_mtz = ''
if os.path.isfile(os.path.join(self.reference_directory, reference_file + '.cif')):
reference_file_cif = ' --libin ' + os.path.join(self.reference_directory, reference_file + '.cif')
else:
reference_file_cif = ''
if os.path.isfile(os.path.join(self.initial_model_directory, xtal, xtal +'.mtz')):
mtzin = os.path.join(self.initial_model_directory, xtal, xtal +'.mtz')
self.update_log.insert('adding ' + xtal + visit + '-' + run + autoproc + ' to list')
job_list.append([xtal,
visit + '-' + run + autoproc,
mtzin,
reference_file_pdb,
reference_file_mtz,
reference_file_cif])
self.status_bar.showMessage('idle')
return job_list
def check_before_running_dimple(self, job_list,instruction):
msgBox = QtGui.QMessageBox()
msgBox.setText(
"Do you really want to run {0!s} {1!s} jobs?\nNote: we will not run more than {2!s} at once on the cluster!".format(
len(job_list),self.preferences['initial_refinement_pipeline'],self.preferences['max_queue_jobs']))
msgBox.addButton(QtGui.QPushButton('Go'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply == 0:
if 'dimple' in instruction.lower():
pipeline = 'dimple'
elif 'pipedream' in instruction.lower():
pipeline = 'pipedream'
elif 'phenix' in instruction.lower():
pipeline = 'phenix.ligand_pipeline'
self.status_bar.showMessage('preparing {0!s} DIMPLE jobs'.format(len(job_list)))
self.update_log.insert('preparing to run {0!s} DIMPLE jobs'.format(len(job_list)))
if self.external_software['qsub_array']:
self.update_log.insert('we will be running an ARRAY job on the DLS computer cluster')
self.update_log.insert(
'please note that the maximum number of jobs that will be running at once is {0!s}'.format(
self.max_queue_jobs))
self.update_log.insert(
'you can change this in the PREFERENCES menu, but be warned that to high a number might break the cluster!')
self.update_log.insert('preparing input files for DIMPLE...')
self.work_thread = XChemThread.run_dimple_on_all_autoprocessing_files_new(job_list,
self.initial_model_directory,
self.external_software,
self.ccp4_scratch_directory,
self.database_directory,
self.data_source_file,
self.max_queue_jobs,
self.xce_logfile,
self.using_remote_qsub_submission,
self.remote_qsub_submission,
self.preferences['dimple_twin_mode'],
pipeline )
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("datasource_menu_reload_samples"),
self.datasource_menu_reload_samples)
self.work_thread.start()
def open_csv_file_translate_datasetID_to_sampleID(self):
file_name_temp = QtGui.QFileDialog.getOpenFileNameAndFilter(self.window, 'Open file', self.current_directory,
'*.csv')
file_name = tuple(file_name_temp)[0]
self.translate_datasetID_to_sampleID_csv_label.setText(file_name)
self.translate_datasetID_to_sampleID_file = file_name
def update_datasets_reprocess_table(self, data_dict):
self.update_log.insert('updating reprocess datasets table')
print('updating reprocess datasets table')
self.diffraction_data_table_dict = {}
self.diffraction_data_dict = data_dict
self.diffraction_data_search_info = 'found ' + str(len(self.diffraction_data_dict)) + ' datasets'
self.diffraction_data_search_label.setText(self.diffraction_data_search_info)
self.update_log.insert(self.diffraction_data_search_info)
self.datasource_menu_reload_samples()
# update table
column_name = self.db.translate_xce_column_list_to_sqlite(self.datasets_reprocess_columns)
# set rows to 0
self.datasets_reprocess_table.setRowCount(0)
for entry in sorted(self.diffraction_data_dict):
self.update_log.insert(str(self.diffraction_data_dict[entry]))
if entry in self.xtal_db_dict:
db_dict = self.xtal_db_dict[entry]
else:
db_dict = {}
row = self.datasets_reprocess_table.rowCount()
self.datasets_reprocess_table.insertRow(row)
for column, header in enumerate(column_name):
if header[0] == 'Dataset ID' or header[0] == 'Sample ID':
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(entry))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.datasets_reprocess_table.setItem(row, column, cell_text)
elif header[0] == 'Run\nxia2':
run_xia2 = QtGui.QCheckBox()
run_xia2.toggle()
self.datasets_reprocess_table.setCellWidget(row, column, run_xia2)
run_xia2.setChecked(False)
self.diffraction_data_table_dict[entry] = [run_xia2]
else:
cell_text = QtGui.QTableWidgetItem()
if db_dict != {}:
if header[0] == 'DataProcessing\nStatus':
if str(db_dict[header[1]]) == 'running':
cell_text.setBackground(QtGui.QColor(100, 230, 150))
elif str(db_dict[header[1]]) == 'pending':
cell_text.setBackground(QtGui.QColor(20, 100, 230))
elif str(db_dict[header[1]]) == 'started':
cell_text.setBackground(QtGui.QColor(230, 240, 110))
elif str(db_dict[header[1]]) == 'finished':
cell_text.setBackground(QtGui.QColor(255, 255, 255))
cell_text.setText(str(db_dict[header[1]]))
else:
cell_text.setText('')
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.datasets_reprocess_table.setItem(row, column, cell_text)
def update_all_tables(self):
self.update_log.insert('checking for new reference files')
self.update_status_bar('checking for new reference files')
self.reference_file_list = self.get_reference_file_list(' ')
self.update_log.insert('updating Overview table')
self.update_status_bar('updating Overview table')
self.populate_and_update_datasource_table()
self.update_log.insert('updating Maps table')
self.update_status_bar('updating Maps table')
self.create_maps_table()
self.update_log.insert('updating PANDDA table')
self.update_status_bar('updating PANDDA table')
self.populate_pandda_analyse_input_table()
self.update_log.insert('updating REFINEMENT table')
self.update_status_bar('updating REFINEMENT table')
self.populate_and_update_refinement_table()
self.update_log.insert('updating REPROCESSING table')
self.update_status_bar('updating REPROCESSING table')
self.update_reprocessing_table()
self.update_status_bar('idle')
self.update_summary_plot()
def change_allowed_unitcell_difference_percent(self, text):
try:
self.allowed_unitcell_difference_percent = int(text)
self.settings['unitcell_difference'] = self.allowed_unitcell_difference_percent
self.update_log.insert(
'changing max allowed unit cell difference between reference and xtal to {0!s} percent'.format(
self.allowed_unitcell_difference_percent))
except ValueError:
if str(text).find('.') != -1:
self.allowed_unitcell_difference_percent = int(str(text)[:str(text).find('.')])
self.settings['unitcell_difference'] = self.allowed_unitcell_difference_percent
self.update_log.insert(
'changing max allowed unit cell difference between reference and xtal to {0!s} percent'.format(
self.allowed_unitcell_difference_percent))
else:
pass
def change_max_queue_jobs(self, text):
try:
self.max_queue_jobs = int(text)
self.settings['max_queue_jobs'] = self.max_queue_jobs
self.update_log.insert('changing max number of jobs running simultaneously on DLS cluster to {0!s}'.format(
self.max_queue_jobs))
except ValueError:
if str(text).find('.') != -1:
self.max_queue_jobs = int(str(text)[:str(text).find('.')])
self.settings['max_queue_jobs'] = self.max_queue_jobs
self.update_log.insert(
'changing max number of jobs running simultaneously on DLS cluster to {0!s}'.format(
self.max_queue_jobs))
else:
pass
def change_acceptable_low_resolution_limit(self, text):
try:
self.acceptable_low_resolution_limit_for_data = float(text)
self.settings['too_low_resolution_data'] = self.acceptable_low_resolution_limit_for_data
except ValueError:
pass
def change_filename_root(self, text):
self.filename_root = str(text)
self.settings['filename_root'] = self.filename_root
def button_clicked(self):
if not self.data_source_set:
print('sender text bit')
if self.sender().text() == "Create New Data\nSource (SQLite)":
file_name = str(QtGui.QFileDialog.getSaveFileName(self.window, 'Save file', self.database_directory))
# make sure that the file always has .sqlite extension
if file_name.rfind('.') != -1:
file_name = file_name[:file_name.rfind('.')] + '.sqlite'
else:
file_name = file_name + '.sqlite'
self.db = XChemDB.data_source(file_name)
print('==> XCE: creating new data source')
self.db.create_empty_data_source_file()
self.db.create_missing_columns()
if self.data_source_file == '':
self.database_directory = file_name[:file_name.rfind('/')]
self.data_source_file = file_name[file_name.rfind('/') + 1:]
self.data_source_file_label.setText(os.path.join(self.database_directory, self.data_source_file))
self.settings['database_directory'] = self.database_directory
self.settings['data_source'] = self.data_source_file
self.data_source_set = True
else:
self.no_data_source_selected()
print('No datasource selected')
pass
# first find out which of the 'Run' or 'Status' buttons is sending
for item in self.workflow_widget_dict:
for widget in self.workflow_widget_dict[item]:
if widget == self.sender():
# get index of item in self.workflow; Note this index should be the same as the index
# of the self.main_tab_widget which belongs to this task
task_index = self.workflow.index(item)
instruction = str(self.workflow_widget_dict[item][0].currentText())
print(instruction)
action = str(self.sender().text())
if self.main_tab_widget.currentIndex() == task_index:
if self.explorer_active == 0 and self.data_source_set == True:
if action == 'Run':
print('==> XCE: Remote submission status = ' + str(self.using_remote_qsub_submission))
# print(instruction)
self.prepare_and_run_task(instruction)
elif action == 'Status':
self.get_status_of_workflow_milestone(instruction)
if os.path.exists(str(self.panddas_directory + '/pandda.done')):
self.pandda_status = 'Finished!'
self.pandda_status_label.setStyleSheet('color: green')
if os.path.exists(str(self.panddas_directory + '/pandda.running')):
self.pandda_status = 'Running...'
self.pandda_status_label.setStyleSheet('color: orange')
if os.path.exists(str(self.panddas_directory + '/pandda.errored')):
self.pandda_status = 'Error encountered... please check the log files for pandda!'
self.pandda_status_label.setStyleSheet('color: red')
self.pandda_status_label.setText(str('STATUS: ' + self.pandda_status))
else:
self.need_to_switch_main_tab(task_index)
def get_status_of_workflow_milestone(self, instruction):
# first update all tables
self.datasource_menu_reload_samples()
cluster_dict = XChemMain.get_jobs_running_on_cluster()
self.update_log.insert('getting status updates...')
self.status_bar.showMessage('please check terminal window for further information')
self.update_log.insert('{0!s} samples are currently in database'.format(str(len(self.xtal_db_dict))))
if 'DIMPLE' in instruction:
XChemMain.print_cluster_status_message('dimple', cluster_dict, self.xce_logfile)
elif 'Create CIF/PDB/PNG file' in instruction:
XChemMain.print_acedrg_status(self.xce_logfile, self.xtal_db_dict)
XChemMain.print_cluster_status_message('acedrg', cluster_dict, self.xce_logfile)
elif instruction.startswith('Run xia2 on selected datasets'):
XChemMain.print_cluster_status_message('xia2', cluster_dict, self.xce_logfile)
elif 'pandda' in instruction.lower():
XChemMain.print_cluster_status_message('pandda', cluster_dict, self.xce_logfile)
elif 'coot' in instruction.lower():
XChemMain.print_cluster_status_message('refmac', cluster_dict, self.xce_logfile)
def prepare_and_run_task(self, instruction):
if instruction == 'Get New Results from Autoprocessing':
self.rescore = False
self.check_for_new_autoprocessing_results()
elif instruction == 'Rescore Datasets':
self.rescore = True
self.select_best_autoprocessing_result()
# if instruction == 'Get New Results from Autoprocessing':
# self.check_for_new_autoprocessing_or_rescore(False)
# self.update_header_and_data_from_datasource()
# self.update_all_tables()
#
# elif instruction == 'Rescore Datasets':
# self.check_for_new_autoprocessing_or_rescore(True)
# elif instruction == "Read PKL file":
# summary = pickle.load(open(self.datasets_summary_file, "rb"))
# self.create_widgets_for_autoprocessing_results_only(summary)
elif instruction == 'Run xia2 on selected datasets':
self.run_xia2_on_selected_datasets(False)
elif instruction == 'Run xia2 on selected datasets - overwrite':
self.run_xia2_on_selected_datasets(True)
# elif instruction == 'Run DIMPLE on All Autoprocessing MTZ files':
# self.rerun_dimple_on_all_autoprocessing_files()
# elif instruction == 'Run initial refinement on selected MTZ files':
# self.run_dimple_on_selected_autoprocessing_file()
elif instruction == 'Run DIMPLE on selected MTZ files':
self.run_dimple_on_selected_autoprocessing_file(instruction)
elif instruction == 'Run PIPEDREAM on selected MTZ files':
self.run_dimple_on_selected_autoprocessing_file(instruction)
elif instruction == 'Run PHENIX.LIGAND_PIPELINE on selected MTZ files':
self.run_dimple_on_selected_autoprocessing_file(instruction)
# elif instruction == 'Remove selected initial refinement files':
# self.remove_selected_dimple_files()
elif instruction == 'Remove selected DIMPLE files':
self.remove_selected_dimple_files(instruction)
elif instruction == 'Remove selected PIPEDREAM files':
self.remove_selected_dimple_files(instruction)
elif instruction == 'Remove selected PHENIX.LIGAND_PIPELINE files':
self.remove_selected_dimple_files(instruction)
# elif instruction == 'Set only results from selected pipeline':
# self.set_results_from_selected_pipeline()
elif instruction == 'Set DIMPLE output':
self.set_results_from_selected_pipeline(instruction)
elif instruction == 'Set PIPEDREAM output':
self.set_results_from_selected_pipeline(instruction)
elif instruction == 'Set PHENIX.LIGAND_PIPELINE output':
self.set_results_from_selected_pipeline(instruction)
# elif instruction == 'Create CIF/PDB/PNG file of ALL compounds':
# self.create_cif_pdb_png_files('ALL')
# elif instruction == 'Create CIF/PDB/PNG file of NEW compounds':
# self.create_cif_pdb_png_files('NEW')
elif instruction == 'Create CIF/PDB/PNG file of SELECTED compounds':
self.create_cif_pdb_png_files('SELECTED')
elif instruction == 'Merge ligand CIF file with selected compounds':
self.merge_cif_files('merge')
elif instruction == 'Restore original CIF file of selected compounds':
self.merge_cif_files('restore')
elif instruction == 'Fit ligands into maps after initial refinement':
self.fit_ligands_into_dimple_maps()
elif instruction == 'pandda.analyse':
self.run_pandda_analyse('production_run')
elif instruction == 'pandda.analyse (PanDDA2)':
self.run_pandda_analyse('production_run_pandda_two')
elif instruction == 'pre-run for ground state model':
self.run_pandda_analyse('pre_run')
elif instruction == 'pandda.inspect':
self.run_pandda_inspect()
elif instruction == 'run pandda.inspect at home':
self.run_pandda_inspect_at_home()
elif instruction == 'Export NEW PANDDA models':
update_datasource_only = False
which_models = 'new'
self.run_pandda_export(update_datasource_only, which_models)
elif instruction == 'Export ALL PANDDA models':
update_datasource_only = False
which_models = 'all'
self.run_pandda_export(update_datasource_only, which_models)
elif instruction == 'Export SELECTED PANDDA models':
update_datasource_only = False
which_models = 'selected'
self.run_pandda_export(update_datasource_only, which_models)
elif instruction == 'refine ALL bound-state models with BUSTER':
self.run_refine_bound_state_with_buster('all')
elif instruction == 'refine NEW bound-state models with BUSTER':
self.run_refine_bound_state_with_buster('new')
elif instruction == 'refine ALL bound-state models with BUSTER (no sanity check)':
self.run_refine_bound_state_with_buster('allnocheck')
elif instruction == 'refine NEW bound-state models with BUSTER (no sanity check)':
self.run_refine_bound_state_with_buster('newnocheck')
# elif instruction == 'refine NEW bound-state models with BUSTER - NEW':
# self.run_refine_bound_state_with_buster_new('new')
elif instruction == 'cluster datasets':
self.cluster_datasets_for_pandda()
elif instruction == 'Update datasource with results from pandda.inspect':
update_datasource_only = True
which_models = 'all'
self.run_pandda_export(update_datasource_only, which_models)
elif instruction == 'Show HTML summary':
self.show_pandda_html_summary()
elif instruction == 'Event Map -> SF':
self.convert_event_maps_to_SF()
elif instruction == 'apo -> mmcif':
self.convert_apo_to_mmcif()
elif instruction == 'check modelled ligands':
self.compare_modelled_ligands_and_panddaTable()
elif instruction.startswith("Open COOT") or instruction == 'Build ground state model':
if not self.coot_running:
self.update_log.insert('starting coot...')
if instruction == "Open COOT":
interface = 'new'
elif instruction == "Open COOT - REFMAC refinement -":
interface = 'new'
elif instruction == "Open COOT - test -":
interface = 'test'
elif instruction == "Open COOT for old PanDDA":
interface = 'panddaV1'
elif instruction == 'Build ground state model':
interface = 'reference'
elif instruction == 'Open COOT - BUSTER refinement -':
interface = 'buster'
elif instruction == 'Open COOT - dimple_twin -':
interface = 'dimple_twin'
else:
interface = 'old'
# print self.settings
self.work_thread = XChemThread.start_COOT(self.settings, interface)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
elif instruction == 'Update Deposition Table':
self.update_deposition_table()
def check_status_create_png_of_soaked_compound(self):
number_of_samples = 0
running = 0
timestamp_list = []
cif_file_generated = 0
for folder in glob.glob(os.path.join(self.initial_model_directory, '*', 'compound')):
number_of_samples += 1
if os.path.isfile(os.path.join(folder, 'RESTRAINTS_IN_PROGRESS')):
running += 1
timestamp = datetime.fromtimestamp(
os.path.getmtime(os.path.join(folder, 'RESTRAINTS_IN_PROGRESS'))).strftime('%Y-%m-%d %H:%M:%S')
timestamp_list.append(timestamp)
for cif_file in glob.glob(os.path.join(folder, '*.cif')):
if os.path.isfile(cif_file):
cif_file_generated += 1
if timestamp_list:
last_timestamp = max(timestamp_list)
else:
last_timestamp = 'n/a'
message = 'Datasets: ' + str(number_of_samples) + ', jobs running: ' + str(running) + ', jobs finished: ' + str(
cif_file_generated) + ', last job submmitted: ' + str(last_timestamp)
self.status_bar.showMessage(message)
if start_thread:
if self.target == '=== SELECT TARGET ===':
msgBox = QtGui.QMessageBox()
warning = ('*** WARNING ***\n'
'You did not select a target!\n'
'In this case we will only parse the project directory!\n'
'Please note that this option is usually only useful in case you reprocessed your data.\n'
'Do you want to continue?')
msgBox.setText(warning)
msgBox.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply == 0:
start_thread = True
else:
start_thread = False
else:
start_thread = True
if start_thread:
self.work_thread = XChemThread.read_autoprocessing_results_from_disc(self.visit_list,
self.target,
self.reference_file_list,
self.database_directory,
self.data_collection_dict,
self.preferences,
self.datasets_summary_file,
self.initial_model_directory,
rescore_only,
self.acceptable_low_resolution_limit_for_data,
os.path.join(self.database_directory,
self.data_source_file),
self.xce_logfile)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("create_widgets_for_autoprocessing_results_only"),
self.create_widgets_for_autoprocessing_results_only)
self.work_thread.start()
def save_files_to_initial_model_folder(self):
self.work_thread = XChemThread.save_autoprocessing_results_to_disc(self.dataset_outcome_dict,
self.data_collection_table_dict,
self.data_collection_column_three_dict,
self.data_collection_dict,
self.database_directory,
self.data_source_file,
self.initial_model_directory,
self.preferences,
self.datasets_summary_file)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def run_pandda_analyse(self, run):
pandda_params = {
'data_dir': str(self.pandda_input_data_dir_entry.text()),
'out_dir': str(self.pandda_output_data_dir_entry.text()),
'submit_mode': str(self.pandda_submission_mode_selection_combobox.currentText()),
'nproc': str(self.pandda_nproc_entry.text()),
'min_build_datasets': str(self.pandda_min_build_dataset_entry.text()),
'pdb_style': str(self.pandda_pdb_style_entry.text()),
'mtz_style': str(self.pandda_mtz_style_entry.text()),
'sort_event': str(self.pandda_sort_event_combobox.currentText()),
'average_map': str(self.pandda_calc_map_combobox.currentText()),
'max_new_datasets': str(self.pandda_max_new_datasets_entry.text()),
'grid_spacing': str(self.pandda_grid_spacing_entry.text()),
'keyword_arguments': str(self.pandda_keyword_arguments_entry.text()),
'pandda_dir_structure': str(self.pandda_input_data_dir_entry.text()),
'perform_diffraction_data_scaling': str(self.wilson_checkbox.isChecked()),
'filter_pdb': str(self.pandda_reference_file_selection_combobox.currentText()),
'reference_dir': self.reference_directory,
'appendix': '',
'N_datasets': len(glob.glob(os.path.join(self.initial_model_directory, '*', 'dimple.pdb'))),
'write_mean_map': 'interesting',
'pandda_table': self.pandda_analyse_data_table,
'use_remote': self.using_remote_qsub_submission,
'remote_string': self.remote_qsub_submission
}
if run == 'pre_run':
msgBox = QtGui.QMessageBox()
msgBoxLayout = msgBox.layout()
vbox = QtGui.QVBoxLayout()
vbox.addWidget(QtGui.QLabel(XChemToolTips.pandda_pre_run(self.reference_directory)))
hbox = QtGui.QHBoxLayout()
hbox.addWidget(QtGui.QLabel('appendix:'))
appendix = QtGui.QLineEdit()
appendix.setText('pre')
appendix.setFixedWidth(200)
hbox.addWidget(appendix)
vbox.addLayout(hbox)
msgBoxLayout.addLayout(vbox, 0, 0)
msgBox.addButton(QtGui.QPushButton('Go'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply == 0:
pandda_params['appendix'] = str(appendix.text())
pandda_params['max_new_datasets'] = '100'
pandda_params['N_datasets'] = 100
pandda_params['write_mean_map'] = 'all'
else:
return None
self.update_log.insert('preparing pandda.analyse input script')
if run == 'production_run_pandda_two':
self.work_thread = XChemPANDDA.run_pandda_two_analyse(pandda_params, self.xce_logfile,
os.path.join(self.database_directory, self.data_source_file))
else:
self.work_thread = XChemPANDDA.run_pandda_analyse(pandda_params, self.xce_logfile,
os.path.join(self.database_directory, self.data_source_file))
#self.connect(self.work_thread, QtCore.SIGNAL("datasource_menu_reload_samples"),
#self.datasource_menu_reload_samples)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def cluster_datasets_for_pandda(self):
pandda_params = {
'out_dir': str(self.pandda_output_data_dir_entry.text()),
'pdb_style': str(self.pandda_pdb_style_entry.text()),
'mtz_style': str(self.pandda_mtz_style_entry.text())
}
self.update_log.insert('starting giant.cluster_mtzs_and_pdbs')
self.work_thread = XChemPANDDA.giant_cluster_datasets(self.initial_model_directory, pandda_params,
self.xce_logfile, os.path.join(self.database_directory,
self.data_source_file),
run_pandda_analyse)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("datasource_menu_reload_samples"),
self.datasource_menu_reload_samples)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def run_pandda_inspect(self):
self.settings['panddas_directory'] = str(self.pandda_output_data_dir_entry.text())
print('==> XCE: starting pandda.inspect')
self.work_thread = XChemThread.start_pandda_inspect(self.settings, self.xce_logfile)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def run_pandda_inspect_at_home(self):
self.work_thread = XChemPANDDA.run_pandda_inspect_at_home(self.panddas_directory, self.xce_logfile)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
def convert_event_maps_to_SF(self):
self.update_log.insert('converting all event maps in {0!s} to mtz files'.format(self.initial_model_directory))
# self.work_thread = XChemPANDDA.convert_all_event_maps_in_database(self.initial_model_directory,
# self.xce_logfile,
# os.path.join(self.database_directory,
# self.data_source_file))
self.work_thread = XChemPANDDA.find_event_map_for_ligand(self.initial_model_directory,
self.xce_logfile,self.external_software)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def convert_apo_to_mmcif(self):
self.work_thread = XChemPANDDA.convert_apo_structures_to_mmcif(self.panddas_directory,
self.xce_logfile)
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def compare_modelled_ligands_and_panddaTable(self):
self.update_log.insert('checking agreement of ligands in refine.pdb and entries in panddaTable')
self.work_thread = XChemPANDDA.check_number_of_modelled_ligands(self.initial_model_directory,
self.xce_logfile,
os.path.join(self.database_directory,
self.data_source_file))
self.explorer_active = 1
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("show_error_dict"), self.show_error_dict)
self.work_thread.start()
def run_pandda_export(self, update_datasource_only, which_models):
pandda_params = {
'data_dir': str(self.pandda_input_data_dir_entry.text()),
'out_dir': str(self.pandda_output_data_dir_entry.text()),
'submit_mode': str(self.pandda_submission_mode_selection_combobox.currentText()),
'nproc': str(self.pandda_nproc_entry.text()),
'min_build_datasets': str(self.pandda_min_build_dataset_entry.text()),
'pdb_style': str(self.pandda_pdb_style_entry.text()),
'mtz_style': str(self.pandda_mtz_style_entry.text()),
'sort_event': str(self.pandda_sort_event_combobox.currentText()),
'average_map': str(self.pandda_calc_map_combobox.currentText()),
'max_new_datasets': str(self.pandda_max_new_datasets_entry.text()),
'grid_spacing': str(self.pandda_grid_spacing_entry.text()),
'pandda_dir_structure': str(self.pandda_input_data_dir_entry.text()),
'perform_diffraction_data_scaling': str(self.wilson_checkbox.isChecked()),
'filter_pdb': str(self.pandda_reference_file_selection_combobox.currentText()),
'reference_dir': self.reference_directory,
'appendix': '',
'N_datasets': len(glob.glob(os.path.join(self.initial_model_directory, '*', 'dimple.pdb'))),
'write_mean_map': 'interesting',
'pandda_table': self.pandda_analyse_data_table,
'use_remote': self.using_remote_qsub_submission,
'remote_string': self.remote_qsub_submission
}
self.settings['panddas_directory'] = str(self.pandda_output_data_dir_entry.text())
if update_datasource_only:
self.update_log.insert('updating data source with results from pandda.inspect')
else:
self.update_log.insert(
'exporting PANDDA models, updating data source and launching inital refinement for new models')
start_thread = False
if which_models == 'all':
self.update_log.insert('exporting ALL models! *** WARNING *** This may overwrite previous refinements!!!')
msgBox = QtGui.QMessageBox()
msgBox.setText("*** WARNING ***\nThis will overwrite all your manual selections!\nDo you want to continue?")
msgBox.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply == 0:
if update_datasource_only:
self.update_log.insert('will update panddaTable in database only')
else:
self.update_log.insert('will export ALL models!')
start_thread = True
else:
start_thread = False
else:
self.update_log.insert('exporting new models only')
start_thread = True
if start_thread:
self.work_thread = XChemPANDDA.run_pandda_export(self.panddas_directory,
os.path.join(self.database_directory,
self.data_source_file),
self.initial_model_directory, self.xce_logfile,
update_datasource_only, which_models, pandda_params)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
# def run_refine_bound_state_with_buster(self,which_models):
# start_thread = True
# if start_thread:
# self.work_thread = XChemPANDDA.refine_bound_state_with_buster(self.panddas_directory,
# os.path.join(self.database_directory,
# self.data_source_file),
# self.initial_model_directory, self.xce_logfile,
# which_models)
# self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
# self.work_thread.start()
def run_refine_bound_state_with_buster(self,which_models):
start_thread = True
if start_thread:
self.work_thread = XChemPANDDA.export_and_refine_ligand_bound_models(self.panddas_directory,
os.path.join(self.database_directory,
self.data_source_file),
self.initial_model_directory, self.xce_logfile,
which_models)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.work_thread.start()
def show_pandda_html_summary(self):
self.pandda_initial_html.load(QtCore.QUrl(self.pandda_initial_html_file))
self.pandda_initial_html.show()
self.pandda_analyse_html.load(QtCore.QUrl(self.pandda_analyse_html_file))
self.pandda_analyse_html.show()
self.add_map_html()
self.pandda_inspect_html.load(QtCore.QUrl(self.pandda_inspect_html_file))
self.pandda_inspect_html.show()
def create_cif_pdb_png_files(self, todo):
tmp = self.db.execute_statement(
"select CrystalName,CompoundCode,CompoundSmiles from mainTable where CrystalName is not '' and CompoundSmiles is not '' and CompoundSmiles is not NULL;")
compound_list = []
for item in tmp:
if str(item[1]) == '' or str(item[1]) == 'NULL':
compoundID = 'compound'
else:
compoundID = str(item[1])
if todo == 'ALL':
compound_list.append([str(item[0]), compoundID, str(item[2])])
elif todo == 'NEW':
if not os.path.isfile(os.path.join(self.initial_model_directory, str(item[0]), compoundID + '.cif')):
compound_list.append([str(item[0]), compoundID, str(item[2])])
elif todo == 'SELECTED':
if str(item[0]) in self.initial_model_dimple_dict:
if self.initial_model_dimple_dict[str(item[0])][0].isChecked():
compound_list.append([str(item[0]), compoundID, str(item[2])])
if compound_list:
self.update_log.insert(
'trying to create cif and pdb files for ' + str(len(compound_list)) + ' compounds using ACEDRG...')
if self.external_software['qsub']:
self.update_log.insert(
'will try sending ' + str(len(compound_list)) + ' jobs to your computer cluster!')
elif self.external_software['qsub_array']:
self.update_log.insert('will try sending ' + str(
len(compound_list)) + ' jobs as part of an ARRAY job to your computer cluster!')
else:
self.update_log.insert('apparently no cluster available, so will run ' + str(
len(compound_list)) + ' sequential jobs on one core of your local machine.')
self.update_log.insert('this could take a while...')
self.explorer_active = 1
self.work_thread = XChemThread.create_png_and_cif_of_compound(self.external_software,
self.initial_model_directory,
compound_list,
self.database_directory,
self.data_source_file,
todo,
self.ccp4_scratch_directory,
self.xce_logfile,
self.max_queue_jobs,
self.restraints_program)
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("datasource_menu_reload_samples"),
self.datasource_menu_reload_samples)
self.work_thread.start()
def fit_ligands_into_dimple_maps(self):
tmp = self.db.execute_statement(
"select CrystalName,CompoundCode,CompoundSmiles from mainTable where CrystalName is not '' and CompoundSmiles is not '' and CompoundSmiles is not NULL;")
compound_list = []
for item in tmp:
if str(item[1]) == '' or str(item[1]) == 'NULL':
compoundID = 'compound'
else:
compoundID = str(item[1])
if str(item[0]) in self.initial_model_dimple_dict:
if self.initial_model_dimple_dict[str(item[0])][0].isChecked():
compound_list.append([str(item[0]), compoundID, str(item[2])])
if compound_list:
self.update_log.insert(
'trying to auto-fitting into inital maps for ' + str(len(compound_list)) + ' compounds...')
if self.external_software['qsub']:
self.update_log.insert(
'will try sending ' + str(len(compound_list)) + ' jobs to your computer cluster!')
elif self.external_software['qsub_array']:
self.update_log.insert('will try sending ' + str(
len(compound_list)) + ' jobs as part of an ARRAY job to your computer cluster!')
else:
self.update_log.insert('apparently no cluster available, so will run ' + str(
len(compound_list)) + ' sequential jobs on one core of your local machine.')
self.update_log.insert('this could take a while...')
self.explorer_active = 1
self.work_thread = XChemThread.fit_ligands(self.external_software,
self.initial_model_directory,
compound_list,
self.database_directory,
self.data_source_file,
self.ccp4_scratch_directory,
self.xce_logfile,
self.max_queue_jobs)
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("datasource_menu_reload_samples"),
self.datasource_menu_reload_samples)
self.work_thread.start()
def merge_cif_files(self,todo):
start_thread = False
if todo == 'merge':
self.update_log.insert('trying to merge %s with ligand restraint files in project directory' %self.second_cif_file)
elif todo == 'restore':
self.update_log.insert('restoring original CIF files')
start_thread = True
if todo == 'merge':
if os.path.isfile(str(self.second_cif_file)):
self.update_log.insert('checking compound code of second CIF file (%s)' % self.second_cif_file)
self.update_log.insert('Note: LIG and DRG are not allowed!')
import iotbx.cif
cif_model = iotbx.cif.reader(file_path=self.second_cif_file).model()
cif_block = cif_model["comp_list"]
ligID = cif_block["_chem_comp.id"]
self.update_log.insert('found the following compound codes in the supplied CIF file: %s' % str(list(ligID)))
if 'LIG' in list(ligID) or 'DRG' in list(ligID):
self.update_log.error('please change compound code to something other than LIG or DRG')
start_thread = False
else:
start_thread = True
else:
self.update_log.error(XChemToolTips.second_cif_file_not_exists())
start_thread = False
if start_thread:
msgBox = QtGui.QMessageBox()
msgBox.setText(XChemToolTips.second_cif_file_info(self.second_cif_file))
msgBox.addButton(QtGui.QPushButton('OK'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply == 0:
start_thread = True
else:
start_thread = False
else:
self.status_bar.showMessage('Error. Please check terminal window for further information')
tmp = self.db.execute_statement(
"select CrystalName,CompoundCode from mainTable where CrystalName is not '' and CompoundSmiles is not '' and CompoundSmiles is not NULL;")
compound_list = []
for item in tmp:
xtal = str(item[0])
compoundID = str(item[1])
if compoundID == '' or compoundID == 'NULL':
self.update_log.warning('%s: no compound ID in database; skipping...' %xtal)
else:
if str(item[0]) in self.initial_model_dimple_dict:
if self.initial_model_dimple_dict[str(item[0])][0].isChecked():
self.update_log.warning('%s: %s is flagged for merging' % (xtal, compoundID))
compound_list.append([xtal, compoundID])
if compound_list == []:
self.update_log.error('Either no compound ID information in database or no sample selected!')
start_thread = False
if start_thread:
self.explorer_active = 1
self.work_thread = XChemThread.merge_cif_files(self.initial_model_directory,
self.xce_logfile,
self.second_cif_file,
compound_list,
todo)
self.connect(self.work_thread, QtCore.SIGNAL("update_progress_bar"), self.update_progress_bar)
self.connect(self.work_thread, QtCore.SIGNAL("update_status_bar(QString)"), self.update_status_bar)
self.connect(self.work_thread, QtCore.SIGNAL("finished()"), self.thread_finished)
self.connect(self.work_thread, QtCore.SIGNAL("datasource_menu_reload_samples"),
self.datasource_menu_reload_samples)
self.work_thread.start()
def update_deposition_table(self):
# check if PanDDA models are ready for deposition
depositChecks = XChemDeposit.update_deposition_table(
os.path.join(self.database_directory, self.data_source_file))
toDeposit, mismatch = depositChecks.PanDDA_models_to_deposit()
if mismatch != {}:
self.update_log.insert('The following samples contain ligand that are not ready for deposition:')
for entry in mismatch:
self.update_log.insert(entry[0] + ' -> site: ' + entry[1] + ' @ ' + entry[2] + ' => ' + entry[4])
self.update_log.insert('You need to change this before you can continue!')
return None
for xtal in toDeposit:
self.db.update_insert_depositTable(xtal, {})
def show_html_summary_and_diffraction_image(self):
for key in self.albula_button_dict:
if self.albula_button_dict[key][0] == self.sender():
print('==> XCE: showing html summary in firefox')
self.show_html_summary_in_firefox(key)
def need_to_switch_main_tab(self, task_index):
msgBox = QtGui.QMessageBox()
msgBox.setText("Need to switch main tab before you can launch this job")
msgBox.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply == 0:
self.main_tab_widget.setCurrentIndex(task_index)
def check_write_permissions_of_data_source(self):
write_enabled = True
if not os.access(os.path.join(self.database_directory, self.data_source_file), os.W_OK):
QtGui.QMessageBox.warning(self.window, "Data Source Problem",
'\nData Source is Read-Only\n',
QtGui.QMessageBox.Cancel, QtGui.QMessageBox.NoButton,
QtGui.QMessageBox.NoButton)
write_enabled = False
return write_enabled
def no_data_source_selected(self):
QtGui.QMessageBox.warning(self.window, "Data Source Problem",
('Please set or create a data source file\n') +
('Options:\n') +
('1. Use an existing file:\n') +
('- Settings -> Select Data Source File\n') +
('2. Create a new file\n') +
('- Data Source -> Create New Data\nSource (SQLite)'),
QtGui.QMessageBox.Cancel, QtGui.QMessageBox.NoButton,
QtGui.QMessageBox.NoButton)
def update_progress_bar(self, progress):
self.progress_bar.setValue(progress)
def update_status_bar(self, message):
self.status_bar.showMessage(message)
def thread_finished(self):
self.explorer_active = 0
self.update_progress_bar(0)
self.update_status_bar('idle')
def show_error_dict(self, errorDict):
text = ''
for key in errorDict:
text += '{0!s}:\n'.format(key)
for entry in errorDict[key]:
text += ' - ' + entry + '\n'
msgBox = QtGui.QMessageBox()
msgBox.setText(text)
msgBox.exec_()
def create_widgets_for_autoprocessing_results_only(self, data_dict):
self.status_bar.showMessage('Building details table for data processing results')
self.data_collection_dict = data_dict
column_name = ['Program',
'Resolution\nOverall',
'Resolution\n[Mn<I/sig(I)> = 2.0]',
'DataProcessing\nSpaceGroup',
'Mn<I/sig(I)>\nHigh',
'Rmerge\nLow',
'Completeness\nOverall',
'DataProcessing\nUnitCell',
'DataProcessing\nRfree',
'DataProcessing\nScore']
# need to do this because db_dict keys are SQLite column names
diffraction_data_column_name = XChemDB.data_source(
os.path.join(self.database_directory, self.data_source_file)).translate_xce_column_list_to_sqlite(
column_name)
for xtal in sorted(self.data_collection_dict):
if os.path.isfile(os.path.join(self.initial_model_directory, xtal, xtal + '.mtz')):
mtz_already_in_inital_model_directory = True
# column 2: data collection date
# this one should always be there; it may need updating in case another run appears
# first find latest run
tmp = []
for entry in self.data_collection_dict[xtal]:
if entry[0] == 'image':
tmp.append([entry[3], datetime.strptime(entry[3], '%Y-%m-%d %H:%M:%S')])
latest_run = max(tmp, key=lambda x: x[1])[0]
# first check if it does already exist
if xtal not in self.data_collection_column_three_dict:
# generate all the widgets which can later be appended and add them to the dictionary
data_collection_table = QtGui.QTableWidget() # table with data processing results for each pipeline
selection_changed_by_user = False
self.data_collection_column_three_dict[xtal] = [data_collection_table, selection_changed_by_user]
xtal_in_table = True
else:
data_collection_table = self.data_collection_column_three_dict[xtal][0]
selection_changed_by_user = self.data_collection_column_three_dict[xtal][1]
data_collection_table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
data_collection_table.setColumnCount(len(column_name))
font = QtGui.QFont()
font.setPointSize(8)
data_collection_table.setFont(font)
data_collection_table.setHorizontalHeaderLabels(column_name)
data_collection_table.horizontalHeader().setFont(font)
data_collection_table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
#############################################################################
# crystal images
# first check there are new images that are not displayed yet; i.e. they are not in the self.data_collection_image_dict
if xtal not in self.data_collection_image_dict:
# OK this is the first time
self.data_collection_image_dict[xtal] = []
# sort crystal images by timestamp
# reminder: ['image',visit,run,timestamp,image_list,diffraction_image,run_number]
# a) get only image entries from self.data_collection_dict
tmp = []
for entry in self.data_collection_dict[xtal]:
if entry[0] == 'image':
tmp.append(entry)
# b) sort by the previously assigned run number
# note: entry[6]==run_number
for entry in sorted(tmp, key=lambda x: x[6]):
run_number = entry[6]
images_already_in_table = False
for image in self.data_collection_image_dict[xtal]:
if run_number == image[0]:
images_already_in_table = True
break
if not images_already_in_table:
# not if there is a run, but images are for whatever reason not present in self.data_collection_dict
# then use image not available from $XChemExplorer_DIR/image/IMAGE_NOT_AVAILABLE.png
# not sure how to do this at the moment; it will probably trigger an error that I can catch
self.data_collection_image_dict[xtal].append([entry[6], entry[1], entry[2], entry[3], entry[5]])
#############################################################################
# initialize dataset_outcome_dict for xtal
if xtal not in self.dataset_outcome_dict:
self.dataset_outcome_dict[xtal] = []
# dataset outcome buttons
#############################################################################
# table for data processing results
# check if results from particular pipeline are already in table;
# not really looking at the table here, but compare it to self.data_collection_table_dict
row_position = data_collection_table.rowCount()
if not xtal in self.data_collection_table_dict:
self.data_collection_table_dict[xtal] = []
# reminder: ['logfile',visit,run,timestamp,autoproc,file_name,aimless_results,<aimless_index>,False]
logfile_list = []
for entry in self.data_collection_dict[xtal]:
if entry[0] == 'logfile':
logfile_list.append(entry)
for entry in sorted(logfile_list, key=lambda x: x[7]): # sort by aimless_index and so make sure
entry_already_in_table = False # that aimless_index == row
for logfile in self.data_collection_table_dict[xtal]:
if entry[1] == logfile[1] and entry[2] == logfile[2] and entry[3] == logfile[3] and entry[4] == \
logfile[4]:
entry_already_in_table = True
# might have to update Rfree column
for column, header in enumerate(diffraction_data_column_name):
if header == 'DataProcessing\nRfree':
# entry[7]==aimless_index, i.e. row number
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(db_dict[header[1]]))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
data_collection_table.setItem(entry[7], column, cell_text)
break
break
if not entry_already_in_table:
data_collection_table.insertRow(row_position)
db_dict = entry[6]
for column, header in enumerate(diffraction_data_column_name):
cell_text = QtGui.QTableWidgetItem()
try:
cell_text.setText(str(db_dict[header[1]]))
except KeyError:
# this may happen if not score exists
cell_text.setText('0')
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
data_collection_table.setItem(row_position, column, cell_text)
data_collection_table.setRowHeight(row_position, 20)
row_position += 1
self.data_collection_table_dict[xtal].append(
['logfile', entry[1], entry[2], entry[3], entry[4]]) # 'logfile' is just added to have
# same index numbers between lists
data_collection_table.cellClicked.connect(self.user_update_selected_autoproc_datasets_summary_table)
# select best resolution file + set data collection outcome
# the assumption is that index in data_collection_dict and row number are identical
# the assumption for data collection outcome is that as long as a logfile is found, it's a success
logfile_found = False
for entry in self.data_collection_dict[xtal]:
if entry[0] == 'logfile':
index = entry[7]
best_file = entry[8]
logfile_found = True
if best_file:
# we change the selection only if the user did not touch it, assuming that he/she knows best
# if not selection_changed_by_user:
data_collection_table.selectRow(index)
self.populate_datasets_summary_table()
def find_suitable_reference_file(self, db_dict):
reference_file = []
dummy = ['...', '', '', '', 0, '0']
reference_file.append([dummy, 999])
suitable_reference = []
for reference in self.reference_file_list:
# first we need one in the same pointgroup
if reference[5] == db_dict['DataProcessingPointGroup']:
try:
difference = math.fabs(
1 - (float(db_dict['DataProcessingUnitCellVolume']) / float(reference[4]))) * 100
reference_file.append([reference, difference])
except ValueError:
continue
return reference_file
def create_maps_table(self):
column_name = self.db.translate_xce_column_list_to_sqlite(self.maps_table_columns)
for xtal in sorted(self.xtal_db_dict):
new_xtal = False
db_dict = self.xtal_db_dict[xtal]
if str(db_dict['DataCollectionOutcome']).lower().startswith('success'):
reference_file = self.find_suitable_reference_file(db_dict)
smallest_uc_difference = min(reference_file, key=lambda x: x[1])
row = self.maps_table.rowCount()
if xtal not in self.initial_model_dimple_dict:
self.maps_table.insertRow(row)
current_row = row
new_xtal = True
else:
for table_row in range(row):
if self.maps_table.item(table_row, 0).text() == xtal:
current_row = table_row
break
for column, header in enumerate(column_name):
if header[0] == 'Sample ID':
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(xtal))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.maps_table.setItem(current_row, column, cell_text)
elif header[0] == 'Select':
if new_xtal:
run_dimple = QtGui.QCheckBox()
run_dimple.toggle()
self.maps_table.setCellWidget(current_row, column, run_dimple)
run_dimple.setChecked(False)
elif header[0] == 'Reference\nSpaceGroup':
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(smallest_uc_difference[0][1]))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.maps_table.setItem(current_row, column, cell_text)
elif header[0] == 'Difference\nUC Volume (%)':
cell_text = QtGui.QTableWidgetItem()
smallest_uc_difference = min(reference_file, key=lambda x: x[1])
cell_text.setText(str(round(float(smallest_uc_difference[1]), 1)))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.maps_table.setItem(current_row, column, cell_text)
elif header[0] == 'Reference File':
if new_xtal:
reference_file_selection_combobox = QtGui.QComboBox()
self.populate_reference_combobox(reference_file_selection_combobox)
if float(smallest_uc_difference[1]) < self.allowed_unitcell_difference_percent:
index = reference_file_selection_combobox.findText(str(smallest_uc_difference[0][0]),
QtCore.Qt.MatchFixedString)
reference_file_selection_combobox.setCurrentIndex(index)
else:
reference_file_selection_combobox.setCurrentIndex(0)
self.maps_table.setCellWidget(current_row, column,
reference_file_selection_combobox)
else:
reference_file_selection_combobox = self.initial_model_dimple_dict[xtal][1]
self.populate_reference_combobox(reference_file_selection_combobox)
if float(smallest_uc_difference[1]) < self.allowed_unitcell_difference_percent:
index = reference_file_selection_combobox.findText(str(smallest_uc_difference[0][0]),
QtCore.Qt.MatchFixedString)
reference_file_selection_combobox.setCurrentIndex(index)
else:
reference_file_selection_combobox.setCurrentIndex(0)
else:
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(db_dict[header[1]]))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
if header[0] == 'Dimple\nStatus':
if str(db_dict[header[1]]) == 'running':
cell_text.setBackground(QtGui.QColor(100, 230, 150))
elif str(db_dict[header[1]]) == 'pending':
cell_text.setBackground(QtGui.QColor(20, 100, 230))
elif str(db_dict[header[1]]) == 'started':
cell_text.setBackground(QtGui.QColor(230, 240, 110))
elif str(db_dict[header[1]]) == 'finished':
cell_text.setBackground(QtGui.QColor(255, 255, 255))
if header[0] == 'Compound\nStatus':
if str(db_dict[header[1]]) == 'running':
cell_text.setBackground(QtGui.QColor(100, 230, 150))
elif str(db_dict[header[1]]) == 'pending':
cell_text.setBackground(QtGui.QColor(20, 100, 230))
elif str(db_dict[header[1]]) == 'started':
cell_text.setBackground(QtGui.QColor(230, 240, 110))
elif str(db_dict[header[1]]) == 'restraints generated':
cell_text.setBackground(QtGui.QColor(255, 255, 255))
elif str(db_dict[header[1]]) == 'restraints failed':
cell_text.setBackground(QtGui.QColor(255, 0, 0))
elif str(db_dict[header[1]]) == 'missing smiles':
cell_text.setBackground(QtGui.QColor(240, 150, 20))
self.maps_table.setItem(current_row, column, cell_text)
if new_xtal:
self.initial_model_dimple_dict[xtal] = [run_dimple, reference_file_selection_combobox]
def preferences_data_to_copy_combobox_changed(self, i):
text = str(self.preferences_data_to_copy_combobox.currentText())
for item in self.preferences_data_to_copy:
if item[0] == text:
self.preferences['processed_data_to_copy'] = item[1]
break
def preferences_selection_mechanism_combobox_changed(self, i):
text = str(self.preferences_selection_mechanism_combobox.currentText())
self.preferences['dataset_selection_mechanism'] = text
self.update_log.insert('setting datasets selection mechanism to ' + text)
def preferences_initial_refinement_combobox_changed(self, i):
text = str(self.preferences_initial_refinement_combobox.currentText())
self.preferences['initial_refinement_pipeline'] = text
self.update_log.insert('setting initial refinement pipeline to ' + text)
def preferences_restraints_generation_combobox_changed(self):
text = str(self.preferences_restraints_generation_combobox.currentText())
self.restraints_program = text
self.update_log.insert('will use {0!s} for generation of ligand coordinates and restraints'.format(text))
def refinement_outcome_combobox_changed(self):
for xtal in self.refinement_table_dict:
if self.sender() == self.refinement_table_dict[xtal]:
# db_dict = {'RefinementOutcome': str(self.sender().currentText())}
db_dict = {}
db_dict['RefinementOutcome'] = str(self.sender().currentText())
db_dict['RefinementOutcomePerson'] = getpass.getuser()
db_dict['RefinementOutcomeDate'] = datetime.strftime(datetime.now(), '%Y-%m-%d_%H-%M-%S.%f')[:-4]
self.db.create_or_remove_missing_records_in_depositTable(self.xce_logfile, xtal, 'ligand_bound',
db_dict)
def get_reference_file_list(self, reference_root):
# check available reference files
reference_file_list = []
dummy = ['...', '', '', '', 0, '0']
reference_file_list.append(dummy)
if os.path.isfile(os.path.join(self.reference_directory, reference_root + '.pdb')):
pdb_reference = parse().PDBheader(os.path.join(self.reference_directory, reference_root + '.pdb'))
spg_reference = pdb_reference['SpaceGroup']
unitcell_reference = pdb_reference['UnitCell']
lattice_reference = pdb_reference['Lattice']
unitcell_volume_reference = pdb_reference['UnitCellVolume']
pointgroup_reference = pdb_reference['PointGroup']
reference_file_list.append([reference_root,
spg_reference,
unitcell_reference,
lattice_reference,
unitcell_volume_reference,
pointgroup_reference])
else:
for files in glob.glob(self.reference_directory + '/*'):
if files.endswith('.pdb'):
reference_root = files[files.rfind('/') + 1:files.rfind('.')]
if os.path.isfile(os.path.join(self.reference_directory, reference_root + '.pdb')):
# reference_file = reference_root + '.pdb'
pdb_reference = parse().PDBheader(
os.path.join(self.reference_directory, reference_root + '.pdb'))
spg_reference = pdb_reference['SpaceGroup']
unitcell_reference = pdb_reference['UnitCell']
lattice_reference = pdb_reference['Lattice']
unitcell_volume_reference = pdb_reference['UnitCellVolume']
pointgroup_reference = pdb_reference['PointGroup']
reference_file_list.append([reference_root,
spg_reference,
unitcell_reference,
lattice_reference,
unitcell_volume_reference,
pointgroup_reference])
for n, file in enumerate(reference_file_list):
self.update_log.insert('reference file {0!s}: {1!s}'.format(n, file))
return reference_file_list
def dataset_outcome_combobox_change_outcome(self, text):
outcome = str(text)
xtal = ''
for key in self.dataset_outcome_combobox_dict:
if self.dataset_outcome_combobox_dict[key] == self.sender():
xtal = key
self.update_log.insert('user changed data collection outcome of {0!s} to {1!s}'.format(xtal, outcome))
break
self.dataset_outcome_dict[xtal] = outcome
if xtal != '':
# # need to also update if not yet done
# user_already_changed_selection = False
# for n, entry in enumerate(self.data_collection_dict[xtal]):
# if entry[0] == 'user_changed_selection':
# user_already_changed_selection = True
# if entry[0] == 'logfile':
# db_dict = entry[6]
# db_dict['DataCollectionOutcome'] = outcome
# entry[6] = db_dict
# self.data_collection_dict[xtal][n] = entry
# if not user_already_changed_selection:
# self.data_collection_dict[xtal].append(['user_changed_selection'])
# # finally need to update outcome field in data source accordingly
self.update_log.insert('updating dataset outcome in datasource for {0!s}'.format(xtal))
update_dict = {'DataCollectionOutcome': outcome}
self.db.update_insert_data_source(xtal, update_dict)
def set_run_dimple_flag(self, state):
if state == QtCore.Qt.Checked:
for key in self.initial_model_dimple_dict:
self.initial_model_dimple_dict[key][0].setChecked(True)
else:
for key in self.initial_model_dimple_dict:
self.initial_model_dimple_dict[key][0].setChecked(False)
def show_data_collection_details(self, state):
# first remove currently displayed widget
if self.data_collection_details_currently_on_display is not None:
self.data_collection_details_currently_on_display.hide()
self.data_collection_details_currently_on_display = None
tmp = []
allRows = self.datasets_summary_table.rowCount()
for table_row in range(allRows):
tmp.append([self.datasets_summary_table.item(table_row, 0).text(), table_row])
for key in self.datasets_summary_dict:
if self.datasets_summary_dict[key][3] == self.sender():
if self.sender().isChecked():
for item in tmp:
if item[0] == key:
self.datasets_summary_table.selectRow(item[1])
self.data_collection_details_currently_on_display = self.data_collection_column_three_dict[key][0]
self.datasets_summarys_vbox_for_details.addWidget(
self.data_collection_details_currently_on_display)
self.data_collection_details_currently_on_display.show()
else:
# un-check all other ones
self.datasets_summary_dict[key][3].setChecked(False)
# def populate_datasets_summary_table(self):
# self.status_bar.showMessage(
# 'Building summary table for data processing results; be patient this may take a while')
# row = self.datasets_summary_table.rowCount()
# column_name = self.db.translate_xce_column_list_to_sqlite(self.datasets_summary_table_columns)
#
# pinList = self.db.execute_statement(
# "Select CrystalName,PinBarcode,DataCollectionPinBarcode from mainTable where CrystalName is not ''")
# pinDict = {}
# for item in pinList:
# pinDict[str(item[0])] = [str(item[1]), str(item[2])]
#
# for xtal in sorted(self.data_collection_dict):
# new_xtal = False
# if xtal not in self.datasets_summary_dict:
# row = self.datasets_summary_table.rowCount()
# self.datasets_summary_table.insertRow(row)
# self.datasets_summary_dict[xtal] = []
# new_xtal = True
#
# # check for dataset outcome
# outcome = ''
# logfile_found = False
# too_low_resolution = True
# db_dict = {}
# for entry in self.data_collection_dict[xtal]:
# if entry[0] == 'logfile':
# logfile_found = True
# if entry[8]: # if this was auto-selected best resolution file
# db_dict = entry[6]
# try:
# if float(db_dict['DataProcessingResolutionHigh']) <= float(
# self.acceptable_low_resolution_limit_for_data):
# too_low_resolution = False
# except ValueError:
# pass
#
# try:
# outcome = str(self.db.get_value_from_field(xtal, 'DataCollectionOutcome')[0])
# except TypeError:
# outcome = 'Failed - unknown'
# self.update_log.insert('cannot find DataCollectionOutcome for {0!s}'.format(xtal))
# self.dataset_outcome_dict[xtal] = outcome
#
# # find latest run for crystal and diffraction images
# tmp = []
# for entry in self.data_collection_dict[xtal]:
# if entry[0] == 'image':
# tmp.append([entry, datetime.strptime(entry[3], '%Y-%m-%d %H:%M:%S')])
# latest_run = max(tmp, key=lambda x: x[1])[0]
#
# new_run_for_exisiting_crystal_or_new_sample = True
# if new_xtal:
# self.datasets_summary_dict[xtal] = [outcome, db_dict, latest_run]
# else:
# # check if newer run appeared
# old_run_timestamp = self.datasets_summary_dict[xtal][2][3]
# new_run_timestamp = latest_run[3]
# if old_run_timestamp == new_run_timestamp:
# new_run_for_exisiting_crystal_or_new_sample = False
# else:
# checkbox_for_details = self.datasets_summary_dict[xtal][3]
# self.datasets_summary_dict[xtal] = [outcome, db_dict, latest_run, checkbox_for_details]
#
# if new_xtal:
# current_row = row
# else:
# allRows = self.datasets_summary_table.rowCount()
# for table_row in range(allRows):
# if self.datasets_summary_table.item(table_row, 0).text() == xtal:
# current_row = table_row
# break
#
# image_number = 0
# for column, header in enumerate(column_name):
# if header[0] == 'Sample ID':
# cell_text = QtGui.QTableWidgetItem()
# cell_text.setText(str(xtal))
# cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
# self.datasets_summary_table.setItem(current_row, column, cell_text)
# elif header[0] == 'DataCollection\nOutcome':
# if new_xtal:
# dataset_outcome_combobox = QtGui.QComboBox()
# for outcomeItem in self.dataset_outcome:
# dataset_outcome_combobox.addItem(outcomeItem)
# self.datasets_summary_table.setCellWidget(current_row, column, dataset_outcome_combobox)
# dataset_outcome_combobox.activated[str].connect(self.dataset_outcome_combobox_change_outcome)
# self.dataset_outcome_combobox_dict[xtal] = dataset_outcome_combobox
# index = self.dataset_outcome_combobox_dict[xtal].findText(str(outcome), QtCore.Qt.MatchFixedString)
# self.dataset_outcome_combobox_dict[xtal].setCurrentIndex(index)
# continue
#
# elif header[0].startswith('img'):
# if new_run_for_exisiting_crystal_or_new_sample:
# img = latest_run[4]
# pixmap = QtGui.QPixmap()
# # can do this (img[image_number][1]) because made sure in the threading module
# # that there are always exactly 5 images in there
# pixmap.loadFromData(base64.b64decode(img[image_number][1]))
# image = QtGui.QLabel()
# image.resize(128, 80)
# image.setPixmap(pixmap.scaled(image.size(), QtCore.Qt.KeepAspectRatio))
# self.datasets_summary_table.setCellWidget(current_row, column, image)
# image_number += 1
#
# elif header[0].startswith('Show Diffraction\nImage'):
# if new_run_for_exisiting_crystal_or_new_sample:
# diffraction_image = latest_run[5]
# diffraction_image_name = diffraction_image[diffraction_image.rfind('/') + 1:]
# try: # need to try because older pkl file may not have this item in list
# html_summary = latest_run[7]
# except IndexError:
# html_summary = ''
# if new_xtal:
# start_albula_button = QtGui.QPushButton('Show: \n' + diffraction_image_name)
# start_albula_button.clicked.connect(self.show_html_summary_and_diffraction_image)
# self.albula_button_dict[xtal] = [start_albula_button, diffraction_image, html_summary]
# self.datasets_summary_table.setCellWidget(current_row, column, start_albula_button)
# else:
# self.albula_button_dict[xtal][1] = diffraction_image
# elif header[0].startswith('Show\nDetails'):
# if new_xtal:
# show_data_collection_details_checkbox = QtGui.QCheckBox()
# show_data_collection_details_checkbox.toggle()
# show_data_collection_details_checkbox.setChecked(False)
# show_data_collection_details_checkbox.stateChanged.connect(self.show_data_collection_details)
# self.datasets_summary_table.setCellWidget(current_row, column,
# show_data_collection_details_checkbox)
# self.datasets_summary_dict[xtal].append(show_data_collection_details_checkbox)
# elif header[0].startswith('SoakDB\nBarcode') or header[0].startswith('GDA\nBarcode'):
# if new_xtal:
# cell_text = QtGui.QTableWidgetItem()
# if xtal in pinDict:
# if header[0].startswith('SoakDB\nBarcode'):
# cell_text.setText(str(pinDict[xtal][0]))
# elif header[0].startswith('GDA\nBarcode'):
# cell_text.setText(str(pinDict[xtal][1]))
# if pinDict[xtal][0] == 'NULL' or pinDict[xtal][1] == 'NULL':
# cell_text.setBackground(QtGui.QColor(255, 215, 0))
# elif pinDict[xtal][0] != pinDict[xtal][1]:
# cell_text.setBackground(QtGui.QColor(255, 0, 0))
# else:
# cell_text.setText('')
# cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
# self.datasets_summary_table.setItem(current_row, column, cell_text)
# else:
# cell_text = QtGui.QTableWidgetItem()
# # in case data collection failed for whatever reason
# if logfile_found:
# try:
# cell_text.setText(str(db_dict[header[1]]))
# except KeyError: # older pkl files may not have all the columns
# cell_text.setText('n/a')
# else:
# if header[0].startswith('Resolution\n[Mn<I/sig(I)> = 1.5]'):
# cell_text.setText('999')
# elif header[0].startswith('DataProcessing\nRfree'):
# cell_text.setText('999')
# elif header[0].startswith('Rmerge\nLow'):
# cell_text.setText('999')
# else:
# cell_text.setText('')
# cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
# self.datasets_summary_table.setItem(current_row, column, cell_text)
#
# row += 1
#
# self.datasets_summary_table.resizeRowsToContents()
# self.datasets_summary_table.resizeColumnsToContents()
#
# self.status_bar.showMessage('updating Overview table')
#
# self.status_bar.showMessage('idle')
#
# self.save_files_to_initial_model_folder()
#
################################################################################################################
#
#
#
# => new data collection summary table
# > start
def get_sample_list_from_table(self,table):
sampleList = []
allRows = table.rowCount()
for row in xrange(0, allRows):
sample_id = str(table.item(row, 0).text())
sampleList.append(sample_id)
return sorted(sampleList)
def get_row_of_sample_in_table(self,table,xtal):
allRows = table.rowCount()
sampleRow = allRows
for n,row in enumerate(xrange(0, allRows)):
sample_id = str(table.item(row, 0).text())
if sample_id == xtal:
sampleRow = n
break
return sampleRow
def update_row_in_table(self,sample,row,db_dict,table,columns_to_show):
xtal = str(sample)
column_name = self.db.translate_xce_column_list_to_sqlite(columns_to_show)
for column, header in enumerate(column_name):
if header[0] == 'Sample ID':
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(xtal))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
table.setItem(row, column, cell_text)
elif header[0] == 'DataCollection\nOutcome':
if xtal not in self.dataset_outcome_combobox_dict:
dataset_outcome_combobox = QtGui.QComboBox()
for outcomeItem in self.dataset_outcome:
dataset_outcome_combobox.addItem(outcomeItem)
dataset_outcome_combobox.activated[str].connect(self.dataset_outcome_combobox_change_outcome)
self.dataset_outcome_combobox_dict[xtal] = dataset_outcome_combobox
table.setCellWidget(row, column, dataset_outcome_combobox)
index = self.dataset_outcome_combobox_dict[xtal].findText(str(db_dict['DataCollectionOutcome']), QtCore.Qt.MatchFixedString)
self.dataset_outcome_combobox_dict[xtal].setCurrentIndex(index)
elif header[0].startswith('img'):
if os.path.isfile(db_dict[header[1]]):
pixmap = QtGui.QPixmap(db_dict[header[1]])
else:
pixmap = QtGui.QPixmap(
os.path.join(os.getenv('XChemExplorer_DIR'), 'image', 'IMAGE_NOT_AVAILABLE.png'))
image = QtGui.QLabel()
image.resize(128, 80)
image.setPixmap(pixmap.scaled(image.size(), QtCore.Qt.KeepAspectRatio))
table.setCellWidget(row, column, image)
elif header[0] == 'Select':
checkbox = QtGui.QCheckBox()
checkbox.toggle()
if table == self.deposition_table_apo:
if xtal not in self.deposition_table_apo_dict:
self.deposition_table_apo_dict[xtal] = checkbox
if table == self.deposition_table_bound:
if xtal not in self.deposition_table_bound_dict:
self.deposition_table_bound_dict[xtal] = checkbox
table.setCellWidget(row, column, checkbox)
checkbox.setChecked(False)
#elif header[0].startswith('SoakDB\nBarcode') or header[0].startswith('GDA\nBarcode'):
# if new_xtal:
# cell_text = QtGui.QTableWidgetItem()
# if xtal in pinDict:
# if header[0].startswith('SoakDB\nBarcode'):
# cell_text.setText(str(pinDict[xtal][0]))
# elif header[0].startswith('GDA\nBarcode'):
# cell_text.setText(str(pinDict[xtal][1]))
# if pinDict[xtal][0] == 'NULL' or pinDict[xtal][1] == 'NULL':
# cell_text.setBackground(QtGui.QColor(255, 215, 0))
# elif pinDict[xtal][0] != pinDict[xtal][1]:
# cell_text.setBackground(QtGui.QColor(255, 0, 0))
# else:
# cell_text.setText('')
# cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
# self.datasets_summary_table.setItem(current_row, column, cell_text)
else:
cell_text = QtGui.QTableWidgetItem()
# in case data collection failed for whatever reason
try:
cell_text.setText(str(db_dict[header[1]]))
except KeyError: # older pkl files may not have all the columns
cell_text.setText('n/a')
# else:
# if header[0].startswith('Resolution\n[Mn<I/sig(I)> = 1.5]'):
# cell_text.setText('999')
# elif header[0].startswith('DataProcessing\nRfree'):
# cell_text.setText('999')
# elif header[0].startswith('Rmerge\nLow'):
# cell_text.setText('999')
# else:
# cell_text.setText('')
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
table.setItem(row, column, cell_text)
print('row: {0!s} column: {1!s} value: {2!s} header: {3!s}'.format(row, column, cell_text, header[0]))
print('column_name {0!s}'.format(column_name))
def populate_datasets_summary_table_NEW(self):
self.status_bar.showMessage(
'Building summary table for data processing results; be patient this may take a while')
# get information about all samples collected during the current visit
visit, beamline = XChemMain.getVisitAndBeamline(self.beamline_directory)
if self.read_agamemnon.isChecked():
visit = []
for v in glob.glob(os.path.join(self.beamline_directory[:self.beamline_directory.rfind('-') + 1] + '*')):
visit.append(v[v.rfind('/')+1:])
self.update_log.insert('reading information about collected crystals from database...')
collectedXtalsDict = self.db.xtals_collected_during_visit_as_dict(visit)
# instead of using dictionaries, query table of which crystals are in table
samples_in_table = self.get_sample_list_from_table(self.datasets_summary_table)
for xtal in sorted(collectedXtalsDict):
if xtal not in samples_in_table:
row = self.datasets_summary_table.rowCount()
self.datasets_summary_table.insertRow(row)
else:
row = self.get_row_of_sample_in_table(self.datasets_summary_table,xtal)
db_dict = collectedXtalsDict[xtal]
self.update_row_in_table(xtal, row, db_dict, self.datasets_summary_table,
self.datasets_summary_table_columns)
self.datasets_summary_table.resizeRowsToContents()
self.datasets_summary_table.resizeColumnsToContents()
self.status_bar.showMessage('updating Overview table')
self.status_bar.showMessage('idle')
def get_selected_row(self,table):
indexes = table.selectionModel().selectedRows()
for index in sorted(indexes):
selected_row = index.row()
return selected_row
def show_results_from_all_pipelines(self):
selected_row=self.get_selected_row(self.datasets_summary_table)
xtal = self.datasets_summary_table.item(selected_row, 0).text()
# get details of currently selected autoprocessing result
selectedResultDict = self.db.get_db_dict_for_sample(xtal)
dbList=self.db.all_autoprocessing_results_for_xtal_as_dict(xtal)
self.make_data_collection_table()
self.msgBox = QtGui.QMessageBox() # needs to be created here, otherwise the cellClicked function
# will reference it before it exists
for db_dict in dbList:
if str(db_dict['DataProcessingSpaceGroup']).lower() == 'null' or str(db_dict['DataProcessingSpaceGroup']).lower() == 'none':
continue
row = self.data_collection_table.rowCount()
self.data_collection_table.insertRow(row)
self.update_row_in_table(xtal, row, db_dict, self.data_collection_table, self.data_collection_table_columns)
if selectedResultDict['DataCollectionVisit'] == db_dict['DataCollectionVisit'] \
and selectedResultDict['DataCollectionRun'] == db_dict['DataCollectionRun'] \
and selectedResultDict['DataProcessingProgram'] == db_dict['DataProcessingProgram'] \
and selectedResultDict['DataProcessingScore'] == db_dict['DataProcessingScore']:
self.current_row = row
self.data_collection_table.selectRow(row)
self.data_collection_table.cellClicked.connect(self.select_different_autoprocessing_result)
self.data_collection_table_popup()
def make_data_collection_table(self):
# this creates a new table widget every time
# more elegant would be to delete or reset an existing widget...
self.data_collection_table = QtGui.QTableWidget()
self.data_collection_table.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.data_collection_table.setColumnCount(len(self.data_collection_table_columns))
font = QtGui.QFont()
font.setPointSize(8)
self.data_collection_table.setFont(font)
self.data_collection_table.setHorizontalHeaderLabels(self.data_collection_table_columns)
self.data_collection_table.horizontalHeader().setFont(font)
self.data_collection_table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
def data_collection_table_popup(self):
# self.msgBox = QtGui.QMessageBox()
msgBoxLayout = self.msgBox.layout()
qWid = QtGui.QWidget()
qWid.setFixedWidth(3000)
qWid.setFixedHeight(500)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.data_collection_table)
qWid.setLayout(vbox)
# msgBoxLayout.addLayout(vbox, 0, 0)
msgBoxLayout.addWidget(qWid)
self.msgBox.addButton(QtGui.QPushButton('Cancel'), QtGui.QMessageBox.RejectRole)
self.msgBox.resize(1000,200)
self.msgBox.exec_();
def select_different_autoprocessing_result(self):
selected_row=self.get_selected_row(self.data_collection_table)
if selected_row != self.current_row:
xtal = self.data_collection_table.item(selected_row, 0).text()
visit = self.data_collection_table.item(selected_row, 1).text()
run = self.data_collection_table.item(selected_row, 2).text()
autoproc = self.data_collection_table.item(selected_row, 3).text()
score = self.data_collection_table.item(selected_row, 12).text()
for q in range(13):
try:
print('--> {0!s}: {1!s}'.format(q, self.data_collection_table.item(selected_row, q).text()))
except AttributeError:
print('--> {0!s}: None'.format(q))
# get db_dict from collectionTable for visit, run, autoproc
# dbDict = self.db.get_db_dict_for_visit_run_autoproc(xtal,visit,run,autoproc)
dbDict = self.db.get_db_dict_for_visit_run_autoproc_score(xtal, visit, run, autoproc, score)
dbDict['DataProcessingAutoAssigned'] = 'False'
self.update_log.insert('%s: changing selected autoprocessing result to %s %s %s' %(xtal,visit,run,autoproc))
# xtal is QString -> str(xtal)
XChemMain.linkAutoProcessingResult(str(xtal), dbDict, self.initial_model_directory,self.xce_logfile)
self.update_log.insert('%s: updating row in Datasets table' %xtal)
self.db.update_data_source(str(xtal),dbDict)
self.update_log.insert('%s: getting updated information from DB mainTable' %xtal)
dbDict = self.db.get_db_dict_for_sample(xtal)
row = self.get_row_of_sample_in_table(self.datasets_summary_table,xtal)
self.update_row_in_table(xtal, row, dbDict, self.datasets_summary_table,
self.datasets_summary_table_columns)
else:
print('nothing to change')
self.msgBox.done(1)
# < end
#################################################################################################################
def update_outcome_datasets_summary_table(self, sample, outcome):
rows_in_table = self.datasets_summary_table.rowCount()
for row in range(rows_in_table):
if self.datasets_summary_table.item(row, 0).text() == sample:
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(outcome)
self.datasets_summary_table.setItem(row, 3, cell_text)
def user_update_selected_autoproc_datasets_summary_table(self):
for key in self.data_collection_column_three_dict:
if self.data_collection_column_three_dict[key][0] == self.sender():
self.update_log.insert('here: ' + self.sender())
self.update_log.insert('herere' + str(self.data_collection_column_three_dict))
dbTmp = self.xtal_db_dict[key]
stage = dbTmp['RefinementOutcome'].split()[0]
print('===>', key, stage)
if int(stage) > 2:
msgBox = QtGui.QMessageBox()
msgBox.setText(
"*** WARNING ***\n%s is currently %s\nIt will disappear from the Refinement table,\n"
"when you refresh it next time.\nDo you want to continue?" % (
key, dbTmp['RefinementOutcome']))
msgBox.addButton(QtGui.QPushButton('No'), QtGui.QMessageBox.YesRole)
msgBox.addButton(QtGui.QPushButton('Yes'), QtGui.QMessageBox.RejectRole)
reply = msgBox.exec_();
if reply == 0:
self.update_log.insert('will not change data processing selection')
# restore previous selection
for n, entry in enumerate(self.data_collection_dict[key]):
print('==>', n)
if entry[0] == 'logfile':
if entry[8]:
print('===> found:', n)
self.data_collection_column_three_dict[key][0].selectRow(n)
break
indexes = self.sender().selectionModel().selectedRows()
selected_processing_result = 1000000
for index in sorted(indexes):
selected_processing_result = index.row()
# the user changed the selection, i.e. no automated selection will update it
self.update_log.insert('user changed selection')
self.data_collection_column_three_dict[key][1] = True
# need to also update if not yet done
user_already_changed_selection = False
for n, entry in enumerate(self.data_collection_dict[key]):
if entry[0] == 'user_changed_selection':
user_already_changed_selection = True
if entry[0] == 'logfile':
db_dict = entry[6]
db_dict['DataProcessingAutoAssigned'] = 'False'
if entry[7] == selected_processing_result:
db_dict_current = entry[6]
program = db_dict['DataProcessingProgram']
visit = db_dict['DataCollectionVisit']
run = db_dict['DataCollectionRun']
self.update_log.insert(
'user changed data processing files for {0!s} to visit={1!s}, '
'run={2!s}, program={3!s}'.format(key, visit, run, program))
# update datasource
self.update_log.insert('updating datasource...')
self.update_data_source(key, db_dict)
entry[8] = True
else:
entry[8] = False
entry[6] = db_dict
self.data_collection_dict[key][n] = entry
if not user_already_changed_selection:
self.data_collection_dict[key].append(['user_changed_selection'])
XChemMain.change_links_to_selected_data_collection_outcome(key, self.data_collection_dict,
self.data_collection_column_three_dict,
self.dataset_outcome_dict,
self.initial_model_directory,
os.path.join(self.database_directory,
self.data_source_file),
self.xce_logfile)
# update 'Datasets' table
column_name = XChemDB.data_source(
os.path.join(self.database_directory, self.data_source_file)).translate_xce_column_list_to_sqlite(
self.datasets_summary_table_columns)
rows_in_table = self.datasets_summary_table.rowCount()
for row in range(rows_in_table):
if self.datasets_summary_table.item(row, 0).text() == key:
for column, header in enumerate(column_name):
if header[0] == 'Sample ID':
continue
elif header[0] == 'DataCollection\nOutcome':
continue
elif header[0].startswith('img'):
continue
elif header[0].startswith('Show'):
continue
else:
cell_text = QtGui.QTableWidgetItem()
try:
cell_text.setText(str(db_dict_current[header[1]]))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.datasets_summary_table.setItem(row, column, cell_text)
except KeyError:
pass
def update_selected_autoproc_datasets_summary_table(self):
for key in self.data_collection_column_three_dict:
if self.data_collection_column_three_dict[key][0] == self.sender():
sample = key
break
indexes = self.sender().selectionModel().selectedRows()
for index in sorted(indexes):
selected_processing_result = index.row()
for n, entry in enumerate(self.data_collection_dict[sample]):
if entry[0] == 'logfile':
if entry[7] == selected_processing_result:
db_dict = entry[6]
program = db_dict['DataProcessingProgram']
visit = db_dict['DataCollectionVisit']
run = db_dict['DataCollectionRun']
self.update_log.insert(
'user changed data processing files for {0!s} to visit={1!s}, run={2!s}, program={3!s}'.format(
sample, visit, run, program))
# update datasource
self.update_log.insert('updating datasource...')
self.update_data_source(sample, db_dict)
entry[8] = True
else:
entry[8] = False
self.data_collection_dict[sample][n] = entry
# update 'Datasets' table
column_name = XChemDB.data_source(
os.path.join(self.database_directory, self.data_source_file)).translate_xce_column_list_to_sqlite(
self.datasets_summary_table_columns)
rows_in_table = self.datasets_summary_table.rowCount()
for row in range(rows_in_table):
if self.datasets_summary_table.item(row, 0).text() == sample:
for column, header in enumerate(column_name):
if header[0] == 'Sample ID':
continue
elif header[0] == 'DataCollection\nOutcome':
continue
elif header[0].startswith('img'):
continue
elif header[0].startswith('Show'):
continue
else:
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(db_dict[header[1]]))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.datasets_summary_table.setItem(row, column, cell_text)
def populate_and_update_datasource_table(self):
self.overview_datasource_table.setColumnCount(len(self.overview_datasource_table_columns))
# first get a list of all the samples that are already in the table and which will be updated
samples_in_table = []
current_row = self.overview_datasource_table.rowCount()
for row in range(current_row):
sampleID = str(self.overview_datasource_table.item(row, 0).text()) # this must be the case
samples_in_table.append(sampleID)
columns_to_show = self.get_columns_to_show(self.overview_datasource_table_columns)
n_rows = self.get_rows_with_sample_id_not_null_from_datasource()
sample_id_column = self.get_columns_to_show(['Sample ID'])
for row in self.data:
if str(row[sample_id_column[0]]).lower() == 'none' or str(row[sample_id_column[0]]).replace(' ', '') == '':
# do not show rows where sampleID is null
continue
else:
if not str(row[sample_id_column[0]]) in samples_in_table:
# insert row, this is a new sample
x = self.overview_datasource_table.rowCount()
self.overview_datasource_table.insertRow(x)
else:
# find row of this sample in data_source_table
for present_rows in range(self.overview_datasource_table.rowCount()):
if str(row[sample_id_column[0]]) == str(
self.overview_datasource_table.item(present_rows, 0).text()):
x = present_rows
break
for y, item in enumerate(columns_to_show):
cell_text = QtGui.QTableWidgetItem()
if row[item] is None:
cell_text.setText('')
else:
cell_text.setText(str(row[item]))
if self.overview_datasource_table_columns[y] == 'Sample ID': # assumption is that column 0 is always sampleID
cell_text.setFlags(QtCore.Qt.ItemIsEnabled) # and this field cannot be changed
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.overview_datasource_table.setItem(x, y, cell_text)
self.overview_datasource_table.setHorizontalHeaderLabels(self.overview_datasource_table_columns)
def kill_other_pandda_options(self):
for i in range(0, self.pandda_analyse_data_table.rowCount()):
checkbox0 = self.pandda_analyse_data_table.cellWidget(i,1)
checkbox1 = self.pandda_analyse_data_table.cellWidget(i,7)
checkbox2 = self.pandda_analyse_data_table.cellWidget(i,8)
checkbox3 = self.pandda_analyse_data_table.cellWidget(i,9)
if checkbox1.isChecked():
checkbox2.setChecked(False)
checkbox3.setChecked(False)
if checkbox1.isChecked() and checkbox2.isChecked() or checkbox3.isChecked():
checkbox1.setChecked(False)
if checkbox2.isChecked() or checkbox3.isChecked():
checkbox1.setChecked(False)
def populate_pandda_analyse_input_table(self):
column_name = self.db.translate_xce_column_list_to_sqlite(self.pandda_table_columns)
print(column_name)
for xtal in sorted(self.xtal_db_dict):
new_xtal = False
db_dict = self.xtal_db_dict[xtal]
if os.path.isfile(db_dict['DimplePathToPDB']):
row = self.pandda_analyse_data_table.rowCount()
if xtal not in self.pandda_analyse_input_table_dict:
self.pandda_analyse_data_table.insertRow(row)
current_row = row
new_xtal = True
else:
for table_row in range(row):
if self.pandda_analyse_data_table.item(table_row, 0).text() == xtal:
current_row = table_row
break
for column, header in enumerate(column_name):
if header[0]=='Exclude':
deselect_button = QtGui.QCheckBox()
deselect_button.stateChanged.connect(self.kill_other_pandda_options)
self.pandda_analyse_data_table.setCellWidget(current_row, column, deselect_button)
elif header[0]=='Ignore':
deselect_button = QtGui.QCheckBox()
deselect_button.stateChanged.connect(self.kill_other_pandda_options)
self.pandda_analyse_data_table.setCellWidget(current_row, column, deselect_button)
elif header[0]=='Export':
deselect_button = QtGui.QCheckBox()
deselect_button.stateChanged.connect(self.kill_other_pandda_options)
self.pandda_analyse_data_table.setCellWidget(current_row, column, deselect_button)
elif header[0] == 'Sample ID':
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(xtal))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.pandda_analyse_data_table.setItem(current_row, column, cell_text)
else:
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(db_dict[header[1]]))
if header[0] == 'PanDDA\nStatus':
if str(db_dict[header[1]]) == 'running':
cell_text.setBackground(QtGui.QColor(100, 230, 150))
elif str(db_dict[header[1]]) == 'pending':
cell_text.setBackground(QtGui.QColor(20, 100, 230))
elif str(db_dict[header[1]]) == 'started':
cell_text.setBackground(QtGui.QColor(230, 240, 110))
elif str(db_dict[header[1]]) == 'finished':
cell_text.setBackground(QtGui.QColor(255, 255, 255))
elif 'problem' in str(db_dict[header[1]]):
cell_text.setBackground(QtGui.QColor(255, 0, 0))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.pandda_analyse_data_table.setItem(current_row, column, cell_text)
if new_xtal:
self.pandda_analyse_input_table_dict[xtal] = []
def select_sample_for_pandda(self, option):
indexes = self.pandda_analyse_data_table.selectionModel().selectedRows()
if option == 'deselect':
for index in sorted(indexes):
self.pandda_analyse_data_table.cellWidget(index.row(), 6).setChecked(False)
self.pandda_analyse_data_table.cellWidget(index.row(), 7).setChecked(False)
self.pandda_analyse_data_table.cellWidget(index.row(), 8).setChecked(False)
else:
for index in sorted(indexes):
self.pandda_analyse_data_table.cellWidget(index.row(), 6).setChecked(False)
self.pandda_analyse_data_table.cellWidget(index.row(), 7).setChecked(False)
self.pandda_analyse_data_table.cellWidget(index.row(), 8).setChecked(False)
if option =='ignore':
checkbox = self.pandda_analyse_data_table.cellWidget(index.row(), 6)
if option == 'char':
checkbox = self.pandda_analyse_data_table.cellWidget(index.row(), 7)
if option == 'zmap':
checkbox = self.pandda_analyse_data_table.cellWidget(index.row(), 8)
checkbox.setChecked(True)
self.kill_other_pandda_options()
def populate_and_update_refinement_table(self):
# panddaList = self.db.execute_statement(
# "select CrystalName,PANDDA_site_index,PANDDA_site_name,RefinementOutcome "
# "from panddaTable where CrystalName is not '' and PANDDA_site_ligand_placed is 'True';")
# panddaDict = {}
# for item in panddaList:
# if str(item[0]) not in panddaDict:
# panddaDict[str(item[0])] = []
# panddaDict[str(item[0])].append([str(item[1]), str(item[2]), str(item[3])])
column_name = self.db.translate_xce_column_list_to_sqlite(self.refinement_table_columns)
for xtal in sorted(self.xtal_db_dict):
new_xtal = False
db_dict = self.xtal_db_dict[xtal]
try:
stage = int(str(db_dict['RefinementOutcome']).split()[0])
refinementStage = db_dict['RefinementOutcome']
except ValueError:
stage = 0
except IndexError:
stage = 0
if stage >= 3 and stage < 7:
row = self.refinement_table.rowCount()
if xtal not in self.refinement_table_dict:
self.refinement_table.insertRow(row)
current_row = row
new_xtal = True
else:
for table_row in range(row):
if self.refinement_table.item(table_row, 0).text() == xtal:
current_row = table_row
break
for column, header in enumerate(column_name):
if header[0] == 'Sample ID':
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(xtal))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.refinement_table.setItem(current_row, column, cell_text)
elif header[0] == 'Refinement\nOutcome':
if new_xtal:
refinement_outcome_combobox = QtGui.QComboBox()
self.populate_refinement_outcome_combobox(refinement_outcome_combobox)
self.refinement_table.setCellWidget(current_row, column, refinement_outcome_combobox)
else:
refinement_outcome_combobox = self.refinement_table_dict[xtal]
index = refinement_outcome_combobox.findText(refinementStage, QtCore.Qt.MatchFixedString)
refinement_outcome_combobox.setCurrentIndex(index)
refinement_outcome_combobox.currentIndexChanged.connect(
self.refinement_outcome_combobox_changed)
elif header[0] == 'buster-reports':
#"<a href=\"{0!s}">'NAME'</a>".format(db_dict['RefinementBusterReportHTML'])
# db_dict['RefinementBusterReportHTML'] = 'www.google.com'
buster_report = db_dict['RefinementBusterReportHTML']
ref_name = buster_report.split('/')[len(buster_report.split('/'))-2]
buster_report_link = QtGui.QLabel("<a href=\"{0!s}\">{1!s}</a>".format(buster_report,ref_name))
buster_report_link.setOpenExternalLinks(True)
# buster_report_link.setTextInteractionFlags(QtCore.Qt.TextBrowserInteraction)
# buster_report_link.setTextFormat(QtCore.Qt.RichText)
# self.refinement_table.setItem(current_row, column, buster_report_link)
self.refinement_table.setCellWidget(current_row, column, buster_report_link)
# elif header[0] == 'PanDDA site details':
# try:
# panddaDict[xtal].insert(0, ['Index', 'Name', 'Status'])
# outerFrame = QtGui.QFrame()
# outerFrame.setFrameShape(QtGui.QFrame.Box)
# grid = QtGui.QGridLayout()
# for y, entry in enumerate(panddaDict[xtal]):
# for x, info in enumerate(entry):
# frame = QtGui.QFrame()
# frame.setFrameShape(QtGui.QFrame.Box)
# vbox = QtGui.QVBoxLayout()
# vbox.addWidget(QtGui.QLabel(str(entry[x])))
# frame.setLayout(vbox)
# grid.addWidget(frame, y, x)
# outerFrame.setLayout(grid)
# self.refinement_table.setCellWidget(current_row, column, outerFrame)
# except KeyError:
# cell_text = QtGui.QTableWidgetItem()
# cell_text.setText('*** N/A ***')
# cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
# self.refinement_table.setItem(current_row, column, cell_text)
else:
cell_text = QtGui.QTableWidgetItem()
cell_text.setText(str(db_dict[header[1]]))
if header[0] == 'Refinement\nStatus':
if str(db_dict[header[1]]) == 'running':
cell_text.setBackground(QtGui.QColor(100, 230, 150))
elif str(db_dict[header[1]]) == 'pending':
cell_text.setBackground(QtGui.QColor(20, 100, 230))
elif str(db_dict[header[1]]) == 'started':
cell_text.setBackground(QtGui.QColor(230, 240, 110))
elif str(db_dict[header[1]]) == 'finished':
cell_text.setBackground(QtGui.QColor(255, 255, 255))
elif 'problem' in str(db_dict[header[1]]):
cell_text.setBackground(QtGui.QColor(255, 0, 0))
cell_text.setTextAlignment(QtCore.Qt.AlignCenter | QtCore.Qt.AlignCenter)
self.refinement_table.setItem(current_row, column, cell_text)
if new_xtal:
self.refinement_table_dict[xtal] = refinement_outcome_combobox
self.refinement_table.resizeColumnsToContents()
self.refinement_table.resizeRowsToContents()
def get_columns_to_show(self, column_list):
# maybe I coded some garbage before, but I need to find out which column name in the
# data source corresponds to the actually displayed column name in the table
# reason being that the unique column ID for DB may not be nice to look at
columns_to_show = []
for column in column_list:
# first find out what the column name in the header is:
column_name = ''
for name in self.all_columns_in_data_source:
if column == name[1]:
column_name = name[0]
for n, all_column in enumerate(self.header):
if column_name == all_column:
columns_to_show.append(n)
break
return columns_to_show
def get_rows_with_sample_id_not_null_from_datasource(self):
sample_id_column = self.get_columns_to_show(['Sample ID'])
n_rows = 0
for row in self.data:
if not str(row[sample_id_column[0]]).lower() != 'none' or not str(row[sample_id_column[0]]).replace \
(' ', '') == '':
n_rows += 1
return n_rows
def update_data_source(self, sample, db_dict):
data_source = XChemDB.data_source(os.path.join(self.database_directory, self.data_source_file))
def quit_xce(self):
# save pkl file
if self.data_collection_dict != {}:
if os.path.isfile(self.datasets_summary_file):
self.update_log.insert('saving results to PKL file')
pickle.dump(self.data_collection_dict, open(self.datasets_summary_file, 'wb'))
self.update_log.insert('quitting XCE... bye,bye!')
QtGui.qApp.quit()
if __name__ == "__main__":
app = XChemExplorer(sys.argv[1:])
# "Debugging is twice as hard as writing the code in the first
# place. Therefore, if you write the code as cleverly as
# possible, you are, by definition, not smart enough to debug it."
# -- Brian W. Kernighan
# ^^ Who did this? :P
|
6,785 | 178f9dcd9cbea140abebd509b56979417b5d7503 | # Python implementation of Bubble Sort
def bubbleSort(arr):
k = len(arr)
# Traverse through all elements
for i in range(k):
# Last i elements are already in correct place
for j in range(0, k - i - 1):
# Swap if element is greater than next element
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
# example array
intergerArr = [20, 345, 215, 112, 2, 33, 29]
bubbleSort(intergerArr)
print("Sorted array: " + str(intergerArr))
|
6,786 | 4436fa36ec21edb3be467f74d8b9705780535f22 | from common.utils import create_brokers
from Bot import DataGatherBot, ArbitrageBot
import api_config as config
### PAPER 이라고 정의한
# brokers = create_brokers('PAPER', config.CURRENCIES, config.EXCHANGES)
# bot = ArbitrageBot(config, brokers)
# brokers = create_brokers('BACKTEST', config.CURRENCIES, config.EXCHANGES)
# bot = ArbitrageBot(config, brokers) # this automatically loads the data path file.
# backtest_data = '/Users/ericjang/Desktop/LiClipse_Workspace/btc_arbitrage/data/Mar-29-2014_19-00-35__20_14400.p'
# bot.backtest(backtest_data) # start should probably be modified to also allow time ranges (i.e. if i want to run my live trader for 2 hours)
# print('done!')
brokers = create_brokers('LIVE', config.CURRENCIES, config.EXCHANGES)
gp = brokers[2]
# gp.update_all_balances()
# gp.xchg.get_all_balances()
# gatherbot = DataGatherBot(config, brokers)
# maxdepth 체크할 호가 개수(-1)
# gatherbot.start(sleep=1, duration=60 * 60 * 4, maxdepth=4) # 5 hours of data, one minute intervals
# arbiragebot의 경우
trade_bot = ArbitrageBot(config, brokers)
trade_bot.start(sleep=1)
print('Done!')
|
6,787 | 05186093820dffd047b0e7b5a69eb33f94f78b80 | #!/usr/bin/env python
'''
@author : Mitchell Van Braeckel
@id : 1002297
@date : 10/10/2020
@version : python 3.8-32 / python 3.8.5
@course : CIS*4010 Cloud Computing
@brief : A1 Part 2 - AWS DynamoDB ; Q2 - Query OECD
@note :
Description: There are many CSV files containing info from the OECD about agricultural production, each for various regions around the world.
Queries all 4 tables (northamerica, canada, usa, mexico -table names) based on a commodity (code key or label),
looking for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
then output the specific NA definition 'hit' results and probable conclusion for NA definition per variable,
as well as an overall conclusion for NA definition
NOTE: forgot to add ability to specify commodity as cmd line arg instead of STDIN
NOTE: assume year range is 2010 to 2029 (inclusive)
NOTE: assume perfect user input for commodity and variables
- however, if input commodity that's not a valid commodity code or label, exits program with error message
NOTE: NA definition hit refers to if the calculated sum from different tables of CAN, USA, MEX are equal to that of NA (CAN+USA, CAN+USA+MEX, or Neither)
'''
'''
IMPROVEMENT: Use 'encodings' table instead of the CSV file
'''
############################################# IMPORTS #############################################
# IMPORTS - 'pip install <import-package>'
import boto3
import csv
import sys
from boto3.dynamodb.conditions import Key, Attr
############################################ CONSTANTS ############################################
# TABLE CONSTANTS
NORTH_AMERICA = "northamerica"
CANADA = "canada"
USA = "usa"
MEXICO = "mexico"
TABLE_LIST = [NORTH_AMERICA, CANADA, USA, MEXICO]
YEAR_RANGE = range(2010, 2030)
# OTHER CONSTANTS
OUTPUT_FORMAT = "{:<8}{:<18}{:<18}{:<18}{:<18}{:<18}{:<18}{:<10}"
ENCODINGS_CSV = "encodings.csv"
#ENCODINGS_TABLE_NAME = "encodings"
USAGE_STATEMENT = "Usage: py queryOECD.py <commodity-code|commodity-label>"
############################## STATE VARIABLES, INITIALIZATION, MAIN ##############################
# MAIN - Declares global vars and state here, then ask for commodity (check both key/label),
# look for all common variables between CAN, USA, and MEX, outputting all results (for all years) in a table,
# then output the specific NA definition 'hit' results and probable conclusion for NA definition
def main():
#globals
global dynamodb_client
global dynamodb_resource
global na_table
global canada_table
global usa_table
global mexico_table
global total_can_usa
global total_can_usa_mex
global total_neither
# ========== ARGUMENTS ==========
# Collect command line arguments when executing this python script
argc = len(sys.argv)
bad_usage_flag = False
# Check #of args (deal with it later tho)
# 1 optional arg for commodity, otherwise prompt user for it
if argc > 2:
bad_usage_flag = True
print("Error: Too many arguments.")
# Exit with usage statement if flag has been triggered for any reason
if bad_usage_flag:
sys.exit(USAGE_STATEMENT)
# ========== AWS DYNAMO DB ==========
# Init AWS DynamoDB client and resource (NOTE: these are global)
dynamodb_client = boto3.client("dynamodb")
dynamodb_resource = boto3.resource("dynamodb")
# Validate AWS DynamoDB credentials (by testing if 'list_tables()' works)
try:
dynamodb_client.list_tables()
except Exception as e:
print("Error: Invalid or expired credentials (or insufficient permissions to call 'list_tables()')")
sys.exit(f"[ERROR] {e}")
# Check the 4 tables exist, then get them all
err_output = ""
table_list = dynamodb_client.list_tables()['TableNames']
print(f"Existing Tables: {table_list}")
for t in TABLE_LIST:
if t not in table_list:
err_output += f"Error: Invalid table name '{t}' - table does not exist.\n"
# Print all tables that did not exist, then exit
if err_output != "":
print(err_output.strip("\n"))
sys.exit("ERROR: Terminating program because unable to get table that does not exist.")
# Get all tables (after checking they exist) (NOTE: these are global)
na_table = dynamodb_resource.Table(NORTH_AMERICA)
canada_table = dynamodb_resource.Table(CANADA)
usa_table = dynamodb_resource.Table(USA)
mexico_table = dynamodb_resource.Table(MEXICO)
# Open the encodings CSV file and read its contents
commodity_encodings_dict = {}
variable_encodings_dict = {}
with open(ENCODINGS_CSV, "r", newline='') as csv_file:
csv_content = csv.reader(csv_file, delimiter=',')
# if field is var or commodity, set a key-value pair between code and label (in the respective map)
for row in csv_content:
if row[2] == "variable":
variable_encodings_dict[row[0]] = row[1]
elif row[2] == "commodity":
commodity_encodings_dict[row[0]] = row[1]
csv_file.close()
# Check args for commodity now, otherwise prompt user
if argc == 2:
commodity_input = sys.argv[1]
else:
# Ask user for commodity
commodity_input = input("Commodity: ").strip()
# Check if input exists as code key, otherwise try to convert assumed label to code key (if not a label, code will be None after)
if commodity_input.upper() in commodity_encodings_dict:
commodity_code = commodity_input.upper()
else:
commodity_code = convert_dict_label_to_code_key(commodity_input, commodity_encodings_dict)
# Check if commodity found a code or None
print(f"ENCODING: {commodity_code}")
if commodity_code is None:
print(f"Error: Commodity '{commodity_input}' was not found.")
sys.exit("ERROR: Terminating program because input does not exist as an encoding commodity code or label.")
# Init total accumulators for each category
total_can_usa = 0
total_can_usa_mex = 0
total_neither = 0
# iterate through each variable and analyze data (if applicable)
for var in variable_encodings_dict.keys():
if is_common_variable(commodity_code, var):
output_table(commodity_code, var, variable_encodings_dict, commodity_encodings_dict)
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(total_can_usa, total_can_usa_mex, total_neither)
if total_can_usa == max_hits:
na_defn = "CAN+USA"
elif total_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"Overall North America Definition Results: {total_can_usa} CAN+USA, {total_can_usa_mex} CAN+USA+MEX, {total_neither} Neither")
print(f"Conclusion for all {commodity_encodings_dict[commodity_code]} variables = {na_defn}\n")
############################################ FUNCTIONS ############################################
# Converts the label of a dict into its code key, returns None if not a label
def convert_dict_label_to_code_key(label, encodings_dict):
# Get the key of the label if the label exists in the dict as a value
if label in list(encodings_dict.values()):
return list(encodings_dict.keys())[list(encodings_dict.values()).index(label)]
else:
return None
# Check if a commodity code + variable is common across all 4 tables, return true if it is
def is_common_variable(commodity_code, variable):
return (has_commodity_and_variable(na_table, commodity_code, variable) and
has_commodity_and_variable(canada_table, commodity_code, variable) and
has_commodity_and_variable(usa_table, commodity_code, variable) and
has_commodity_and_variable(mexico_table, commodity_code, variable))
# Check if a table has data for commodity code + variable (ie. scan table), returns true if at least 1 item is found
def has_commodity_and_variable(table, commodity_code, variable):
response = table.scan(
FilterExpression = Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)
return response['Count'] > 0
# Retrieves and outputs table data based on commodity and variable and analyze for NA definition
def output_table(commodity_code, variable, variable_encodings_dict, commodity_encodings_dict):
# Bring in globals to modify
global total_can_usa
global total_can_usa_mex
global total_neither
# Init local accumulators
temp_can_usa = 0
temp_can_usa_mex = 0
temp_neither = 0
# Print table headers: common variable (for commodity code) across all 4 tables, and table column names
print(f"Variable: {variable_encodings_dict[variable]}")
print(OUTPUT_FORMAT.format("Year", "North America", "Canada", "USA", "Mexico", "CAN+USA", "CAN+USA+MEX", "NA Defn"))
# Retrieve all data, from all years (ie. the items from the scan)
na_scan_data = na_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
can_scan_data = canada_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
usa_scan_data = usa_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
mex_scan_data = mexico_table.scan(
FilterExpression=Attr('commodity').eq(commodity_code) & Attr('variable').eq(variable)
)['Items']
# Sort each scan data by key
na_scan_data.sort(key=data_sort)
can_scan_data.sort(key=data_sort)
usa_scan_data.sort(key=data_sort)
mex_scan_data.sort(key=data_sort)
# Analyze data
for year in YEAR_RANGE:
# For each relevant year, calculate total value using multiplication factor
i = year - 2010
na_value = na_scan_data[i]['value'] * (10**na_scan_data[i]['mfactor'])
can_value = can_scan_data[i]['value'] * (10**can_scan_data[i]['mfactor'])
usa_value = usa_scan_data[i]['value'] * (10**usa_scan_data[i]['mfactor'])
mex_value = mex_scan_data[i]['value'] * (10**mex_scan_data[i]['mfactor'])
# Calc temp sums for the CAN+USA and CAN+USA+MEX columns
temp_can_usa_value = can_value + usa_value
temp_can_usa_mex_value = can_value + usa_value + mex_value
# Determine OECD def of NA, by checking if the temp calc sums from scan data calc values are equivalent to CAN+USA sum, CAN+USA+MEX sum, or Neither
# Note: accumulate the #of accurate NA def 'hits'
if temp_can_usa_value == na_value:
na_defn = 'CAN+USA'
temp_can_usa += 1
elif temp_can_usa_mex_value == na_value:
na_defn = 'CAN+USA+MEX'
temp_can_usa_mex += 1
else:
na_defn = 'Neither'
temp_neither += 1
# Print table row for current year
print(OUTPUT_FORMAT.format(year, na_value, can_value, usa_value, mex_value, temp_can_usa_value, temp_can_usa_mex_value, na_defn))
# Determine the NA definition for this variable based on #of 'hits' per year
max_hits = max(temp_can_usa, temp_can_usa_mex, temp_neither)
if temp_can_usa == max_hits:
na_defn = "CAN+USA"
elif temp_can_usa_mex == max_hits:
na_defn = "CAN+USA+MEX"
else:
na_defn = "Neither"
print(f"North America Definition Results: {temp_can_usa} CAN+USA, {temp_can_usa_mex} CAN+USA+MEX, {temp_neither} Neither")
print(f"Therefore we can conclude North America = {na_defn}\n")
# Accumulate global totals using temp local accumulators for NA definition 'hits'
total_can_usa += temp_can_usa
total_can_usa_mex += temp_can_usa_mex
total_neither += temp_neither
# Sorter Helper for queried data by year
def data_sort(elem):
return elem['year']
###################################################################################################
main()
|
6,788 | e5bf4518f3834c73c3743d4c711a8d1a4ce3b944 | # Generated by Django 3.2.5 on 2021-08-05 23:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lectures', '0003_auto_20210805_1954'),
]
operations = [
migrations.RenameField(
model_name='lecture',
old_name='is_requird',
new_name='is_required',
),
]
|
6,789 | a8106c8f14e15706b12e6d157b889288b85bc277 | import random
import datetime
import userval
import file
from getpass import getpass
#SORRY FOR THE REDUNDANT CODE, I RAN OUT OF OPTIONS
def register():
global first,last,email,pin,password,accountName #prepared_user_details
first=input("input firstname:")
last=input("input lastname:")
email=input("input email:")
pin=input("input a four digit pin:")
password=input("Input Password:")
accountName = "{} {}".format(last,first)
#prepared_user_details= first + "," + last + "," + email + "," + str(pin) + "," + password + "," + str(0)
#---------------------Account number generator-------------------------
def genAcc():
num= 1
y=[3,0] #all account numbers generated must start with three zero to make it unique
while num <= 8:
x = random.randint(0,9)
y.append(x)
num = num +1
accountNo=''.join([str(i)for i in y])
return accountNo
#-----------------Transfer function---------------------
def transfer(tName, tNo, amount, tBankName):
user[-1]= int(user[-1]) + amount
newval=user[-1]
newval=str(newval)
try:
file.update(user_acc_no,-1,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
print("Tranfer successful! \Account name {} \nAccount number : {} \nAmount transferred : {} \nBank : {}".format(tName, tNo, amount, tBankName))
print("Balance : ${}".format(user[-1]))
tym =datetime.datetime.now()
print(tym)
#-----------------deposit function-----------------------
def deposit(amount):
user[-1] = int(user[-1]) + amount
newval=user[-1]
newval=str(newval)
try:
file.update(user_acc_no,-1,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
print("{} successful deposited".format(amount))
print("your balance is ${}".format(user[-1]))
tym =datetime.datetime.now()
print(tym)
#------------------withdraw function---------------------------
def withdraw(amount):
user[-1]=int(user[-1])
if user[-1] > amount:
user[-1] -= amount
print("successful")
print("your balance is ${}".format(user[-1]))
else:
print("Sorry, not enough funds!")
newval = user[-1]
str(newval)
try:
file.update(user_acc_no,-1,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
tym =datetime.datetime.now()
print(tym)
#---------------------balance check function------------------------
def statement():
print("hi {} your balance is ${}.".format(user[1],user[-1]))
#---------------------pin validation function------------------------
def pinval(val):
if val == user[-3]:
return True
else:
return False
#---------------------pin reset function---------------------------
def pinReset(val,val2):
if val == val2:
user[-3] = val
print("Pin change successful")
newval = user[-3]
try:
file.update(user_acc_no,-3,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
else:
print("oops!! The two pin are not the same")
tym =datetime.datetime.now()
print(tym)
#-----------------password reset function-------------------------
def passReset(val, val2):
if val == val2:
user[-2]= val
print("Password change successful")
newval = user[-2]
try:
file.update(user_acc_no,-2,newval)
except FileNotFoundError:
print("an issues occured due to network, try again later")
return False
else:
print("Passwords not Matched")
tym =datetime.datetime.now()
print(tym)
#----------------------login function---------------------
def login():
global user_acc_no, user_password,user
print("===================LOGIN PAGE=================")
print("Enter your login details")
user_acc_no = int(input("Enter username:"))
user_password = getpass("Enter password:")
user= file.authentication(user_acc_no, user_password)
if user:
operation(user)
else:
print("invalid account and password")
login()
def welcome():
#---------------------------------main prompt---------------
opt= input("Hello!, Welcome to Zuri Bank \n1. Register\n2.Login \n==>")
#-----------------------------Registration Prompt--------------------------
if opt == '1':
print("============================ZURI BANK========================")
print("Welcome please carefully follow the prompt and register your details\n Note please only input 1 or 2 ")
register()
accountNo = ""
accountNo=genAcc()
is_user_created = file.create(accountNo,first,last,email,pin,password)
if is_user_created:
try:
print("Registration Successful!!!\n your details are:\n Account name is {} \n Account number is {}".format(accountName,accountNo))
login()
tym =datetime.datetime.now()
print(tym)
except FileExistsError:
print("sorry there was a issue in network connection, please try again")
register()
except ValueError:
print("sorry there was a issue in network connection, please try again")
register()
elif opt == '2':
login()
else:
print("Wrong input. Note: enter 1 or 2 to select")
def operation(user):
print("==========================ZURI BANK===================")
print("welcome {}".format(user[1] + ' ' + user[0]))
print("Balance : ${}".format(user[-1]))
print("Please input only 1,2,3,4,5,6, or 7")
mainOpt=input("select an option: \n1. Transfer \n2. Withdrawal \n3. Deposit \n4. Change Pin \n5. Reset Password \n6. Account Statment\n7. Complaint\n8. Logout\n0. Exit \n==>")
if mainOpt == '1':
print("Balance = ${}".format(user[-1]))
amount=int(input("Enter amount:"))
tName=input("Enter account name:")
tNo=input("Enter account Number:")
tBankName=input("Enter Bank:")
val=input("Enter PIN")
if (pinval(val) == True):
if len(tNo) != 10:
print("wrong account number, Note Account number must be 10 digit")
else:
transfer(tName,tNo,amount,tBankName)
operation(user)
else:
print("wrong pin")
elif mainOpt == '2':
print("Balance = ${}".format(user[-1]))
amount=int(input("Enter Amount:"))
val=int(input("Enter transaction Pin:"))
pinval(val)
if pinval(val) == True:
withdraw(amount)
operation(user)
else:
print("oop!! wrong pin")
elif mainOpt == '3':
print("Balance = ${}".format(user[-1]))
amount=int(input("Enter Amount:"))
deposit(amount)
operation(user)
elif mainOpt == '4':
val=input("Enter new pin:")
val2=input("Confirm new pin:")
pinReset(val,val2)
operation(user)
elif mainOpt == '5':
val=input("Enter new password:")
val2=input("Confirm new password:")
passReset(val,val2)
operation(user)
elif mainOpt == '6':
statement()
operation(user)
elif mainOpt == '7':
comp=input("Enter complaint:")
print("Thanks {} for reaching to us, we will get back to you shortly via your email:{}".format(user[1],user[3]))
operation(user)
elif mainOpt == '8':
login()
else:
print("Thank you for banking with us!!!")
exit()
welcome() |
6,790 | 8030bdb6c9f0b7114916d7abc245ff680d1fc917 | workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep'
seed = 300
n_fold = 2
epoch = 50
resume_from = None
batch_size = 32
num_workers = 32
imgsize = (768, 768) #(height, width)
loss = dict(
name='BCEWithLogitsLoss',
params=dict(),
)
optim = dict(
name='AdamW',
params=dict(
lr=0.0003,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=0.01,
),
)
model = dict(
name='se_resnext50_32x4d'
)
normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225],}
totensor = dict(name='ToTensor', params=dict(normalize=normalize))
crop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))
crop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))
rotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7))
hflip = dict(name='HorizontalFlip', params=dict(p=0.5))
'''
Additional augmentarions
------------------------
vflip = dict(name='VerticalFlip', params=dict(p=0.5,))
random_brightness_contrast = dict(name='RandomBrightnessContrast', params=dict(brightness_limit=0.2, contrast_limit=0.2, p=0.5))
#gaussian_blur = dict(name='GaussianBlur', params=dict(blur_limit=7, always_apply=False, p=0.5))
#iaa_emboss = dict(name='IAAEmboss', params=dict(alpha=(0.2, 0.5), strength=(0.2, 0.7), always_apply=False, p=0.5))
#iaa_sharpen = dict(name='IAASharpen', params=dict(alpha=(0.2, 0.5), lightness=(0.5, 1.0), always_apply=False, p=0.5))
hue_saturation_value = dict(name='HueSaturationValue', params=dict(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.4))
cut_out = dict(name='Cutout', params=dict(num_holes=8, max_h_size=546//8, max_w_size=546//8, fill_value=0, p=0.3))
blur = dict(name='Blur', params=dict(blur_limit=4, p=.25))
shift_scale_rotate = dict(name='ShiftScaleRotate', params=dict(shift_limit=0.2, scale_limit=0.2, rotate_limit=20, p=1))
'''
rotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7))
dicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9))
dicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.05, p=0.7))
elastic_transform = dict(name='ElasticTransform', params=dict(alpha=1, sigma=50, p=0.5))
grid_distortion = dict(name='GridDistortion', params=dict(), p=0.5)
window_policy = 1
data = dict(
train=dict(
dataset_type='CustomDataset',
annotations='./cache/train-runmila_2folds_seed123.pkl',
imgdir='./input/runmila_i768',
imgsize=imgsize,
n_grad_acc=2,
loader=dict(
shuffle=True,
batch_size=batch_size,
drop_last=True,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop, hflip, rotate, dicomnoise, totensor],
dataset_policy=1,
window_policy=window_policy,
),
valid = dict(
dataset_type='CustomDataset',
annotations='./cache/train-runmila_2folds_seed123.pkl',
imgdir='./input/runmila_i768',
imgsize=imgsize,
loader=dict(
shuffle=False,
batch_size=batch_size,
drop_last=False,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],
dataset_policy=1,
window_policy=window_policy,
),
test = dict(
dataset_type='CustomDataset',
annotations='./cache/test.pkl',
imgdir='./input/test_runmila_i768',
imgsize=imgsize,
loader=dict(
shuffle=False,
batch_size=batch_size,
drop_last=False,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],
dataset_policy=1,
window_policy=window_policy,
),
)
|
6,791 | 804c75b3ab0b115e5187d44e4d139cfb553269a9 | from django import template
from ..models import Article
# 得到django 负责管理标签和过滤器的类
register = template.Library()
@register.simple_tag
def getlatestarticle():
latearticle = Article.objects.all().order_by("-atime")
return latearticle |
6,792 | 52f3000514fd39083daa6316d551f1685c7cea23 | from random import randint
class Game(object):
def __init__(self, players):
if len(players) < 2:
raise ValueError('Number of player must be at least 2')
self.play_order = players
self.player_data = {}
for player in self.play_order:
# [total, on_hand, hunch]
self.player_data[player] = [3, None, None, False]
self.player_data['IA 1'][3] = True
self.player_data['IA 2'][3] = True
def game_loop(self):
while not self.won():
hunches = []
for player, data in self.player_data.items():
print("Jogador: {}".format(player))
if (data[3]):
data[1] = randint(0, data[0])
else:
data[1] = randint(0, data[0])
print("Palitos na mão: {}\n".format(data[1]))
for player in self.play_order:
print("Jogador: {}".format(player))
if (self.player_data[player][3]):
hunch = self.hunch(player, hunches)
self.player_data[player][2] = hunch
else:
# random hunch
hunch = randint(0, self.max())
while hunch in hunches:
hunch = randint(0, self.max())
self.player_data[player][2] = hunch
# human hunch
# hunch = int(input("Qual seu palpite?\n"))
# while (hunch in hunches):
# hunch = int(input("Palpite invalido. \nQual seu palpite?\n"))
# self.player_data[player][2] = hunch
print("Palpite: {}\n".format(hunch))
hunches.append(hunch)
winner = self.round_won()
print("Soma dos palitos: {}".format(self.sum()))
if winner:
print("{} ganhou a rodada\n".format(winner))
self.player_data[winner][0] -= 1
self.play_order.remove(winner)
self.play_order.insert(0, winner)
else:
print("Ninguém ganhou :(\n")
print(("-" * 10) + " nova rodada " + ("-" * 10))
self.reset()
for player, data in self.player_data.items():
if data[0] == 0:
print("{} ganhou o jogo".format(player))
return player
def hunch(self, player, hunches):
# seu palpite inicial eh pelo menos a sua quantidade de palitos
hunch = self.player_data[player][1]
rand = 0
sticks = []
stik = 0
# calcula os palitos dos jogadores anteriores atraves dos palpites destes
for other_player in self.play_order[0:self.play_order.index(player)]:
# media dos proximos jogadores
average = self.average(self.play_order[self.play_order.index(other_player):len(self.play_order) - 1])
# calcula os palitos estimados do jogador
stik = self.player_data[other_player][2] - average[0]
# remove os palitos anteriores que ja estao considerados
for stick in sticks:
stik -= stick
sticks.append(stik)
# erros de arredondamento, adiciona a randomicidade esperada
rand += average[1]
hunch += stik
# chama average com os jogadores remanescente
average = self.average(self.play_order[self.play_order.index(player):len(self.play_order) - 1])
# caso o numero seja quebrado (0.5) adiciona-se 1 a randomicidade
rand += average[1]
# valor estimado, com metade da randomicidade
hunch += average[0] + rand // 2
# caso o chute ja tenha sido usado, chutar o mais proximo possivel
# começando pelo lado mais proximo da media
if (self.average(self.play_order)[0] > hunch):
i = 0
while (hunch in hunches) or (hunch > self.max()) or (hunch < 0):
i += 1
if i % 2 == 0:
hunch -= i
else:
hunch += i
else:
i = 0
while (hunch in hunches) or (hunch > self.max()) or (hunch < 0):
i += 1
if i % 2 == 0:
hunch += i
else:
hunch -= i
# retorna seu chute
return hunch
def average(self, remaining_players):
result = 0
for player in remaining_players:
result += self.player_data[player][0]
# entrega a media do resultado, e se houve sobra entrega 1 no segundo argumento
return [result // 2, result % 2]
def max(self):
total = 0
for player in self.play_order:
total += self.player_data[player][0]
return total
def reset(self):
for player, data in self.player_data.items():
data[1] = None
data[2] = None
def round_won(self):
sum = self.sum()
for player, data in self.player_data.items():
if data[2] == sum:
return player
return None
def won(self):
for player, data in self.player_data.items():
if data[0] == 0:
return True
return False
def sum(self):
sum = 0
for player, data in self.player_data.items():
sum += data[1]
return sum
if __name__ == '__main__':
players = ['Rand A', 'Rand B', 'Rand C', 'IA 1', 'IA 2']
wins = {}
n = 1
for player in players:
wins[player] = 0
for i in range(0, n):
game = Game(players)
winner = game.game_loop()
if winner:
wins[winner] += 1
print("\nRelatório:")
for player, win_count in wins.items():
print("{} ganhou {} vezes".format(player, win_count))
|
6,793 | 7e33475a6ab7ad0d1e9d7d00b8443329e265fe69 | def printPar():
for i in range(len(par)):
print "par[{0:d}] = {1:d}".format(i,par[i])
def printImpar():
for i in range(len(impar)):
print "impar[{0:d}] = {1:d}".format(i,impar[i])
par = []
impar = []
for i in range(15):
n= int(raw_input())
if n%2 == 0:
if len(par)<4:
par.append(n)
elif len(par)==4:
par.append(n)
printPar()
par = []
else:
if len(impar)<4:
impar.append(n)
elif len(impar)==4:
impar.append(n)
printImpar()
impar = []
if len(impar)>0:
printImpar()
if len(par)>0:
printPar()
|
6,794 | 430dff54da986df4e3a68018d930735c757d49d0 | import time
import json
from threading import Thread
try:
with open('file.json') as f:
name = json.load(f)
except:
f = open("file.json", "w+")
name = {}
def create(k, v, t='0'):
if k in name:
print("ERROR:The data already exists")
else:
if k.isalpha():
if v.isnumeric() and t.isnumeric():
v = int(v)
t = int(t)
if len(name) < (1024 * 1020 * 1024) and v <= (16 * 1024 * 1024):
if t == 0:
p = [v, t]
else:
p = [v, time.time() + t]
if len(k) <= 32:
name[k] = p
print("Key is created")
with open('file.json', 'w') as json_file:
json.dump(name, json_file)
else:
print("ERROR:Key length Exceeded")
else:
print("ERROR:MEMORY Exceeded!!!")
else:
print("ERROR:INVALID INPUT (NUMERIC ONLY)")
else:
print("ERROR:INVALID KEY INPUT (ALPHABETS ONLY)")
def read(k):
if k not in name:
print("ERROR:Key does not exists Enter a valid key!!")
else:
m = name[k]
if m[1] != 0:
if time.time() < m[1]:
print ( k + "-" + str(m[0]))
else:
print("ERROR: " + k + " Time expired")
else:
print(k + "-" + str(m[0]))
with open('file.json', 'w') as js:
json.dump(name, js)
def delete(k):
if k not in name:
print("ERROR:Key does not exists Enter a valid key!!")
else:
m = name[k]
if m[1] != 0:
if time.time() < m[1]:
del name[k]
print("Key (" + k + ") is deleted")
with open('file.json', 'w') as js:
json.dump(name, js)
else:
print("ERROR:ERROR: " + k + " Time expired")
else:
del name[k]
print("Key (" + k + ") is deleted")
with open('file.json', 'w') as js:
json.dump(name, js) |
6,795 | 76382f353c47747ee730d83c2d3990049c4b0d98 | ##
## Copyright (C) by Argonne National Laboratory
## See COPYRIGHT in top-level directory
##
import re
import os
class G:
pmi_vers = []
cmd_list = []
cmd_hash = {}
class RE:
m = None
def match(pat, str, flags=0):
RE.m = re.match(pat, str, flags)
return RE.m
def search(pat, str, flags=0):
RE.m = re.search(pat, str, flags)
return RE.m
def main():
# run from pmi top_srcdir
load_pmi_txt("maint/pmi-1.1.txt", "1.1")
load_pmi_txt("maint/pmi-2.0.txt", "2.0")
dump_all()
def load_pmi_txt(pmi_txt, ver):
cur_hash = {"version": ver}
G.pmi_vers.append(cur_hash)
prev_cmd = None
cur_cmd = None
cur_attrs = None
with open(pmi_txt, "r") as In:
for line in In:
if RE.match(r'([A-Z]+):', line):
name = RE.m.group(1)
cur_cmd = {"version": ver} # query-name, query-attrs, response-name, response-attrs
cur_hash[name] = cur_cmd
if name not in G.cmd_hash:
G.cmd_list.append(name)
G.cmd_hash[name] = cur_cmd
prev_cmd = None
else:
prev_cmd = G.cmd_hash[name]
elif RE.match(r'\s+([QR]):\s*([\w-]+)(.*)', line):
QR, cmd, tail = RE.m.group(1, 2, 3)
cur_attrs = []
if QR == "Q":
cur_cmd["query-name"] = cmd
if RE.match(r'.*wire=.+', tail):
# spawn - we'll manually code it
cur_cmd["query-attrs"] = []
else:
cur_cmd["query-attrs"] = cur_attrs
else:
cur_cmd["response-name"] = cmd
cur_cmd["response-attrs"] = cur_attrs
elif RE.match(r'\s+([\w-]+):\s*([A-Z]+)(.*)', line):
name, kind, tail = RE.m.group(1, 2, 3)
cur_attrs.append([name, kind, tail])
elif RE.match(r'\s+([\[\]])', line):
cur_attrs.append(RE.m.group(1))
def dump_all():
def dump_enums(Out):
print("enum PMIU_CMD_ID {", file=Out)
print(" PMIU_CMD_INVALID,", file=Out)
for NAME in G.cmd_list:
print(" PMIU_CMD_%s," % NAME, file=Out)
print("};", file=Out)
print("", file=Out)
def dump_decls(Out):
std_query="struct PMIU_cmd *pmi_query, int version, bool is_static"
std_response="struct PMIU_cmd *pmi_query, struct PMIU_cmd *pmi_resp, bool is_static"
std_get="struct PMIU_cmd *pmi"
for NAME in G.cmd_list:
name = NAME.lower()
v_list = []
for v in G.pmi_vers:
if NAME in v:
v_list.append(v[NAME])
v0 = v_list[0]
decls = []
if "query-name" in v0:
if len(v0["query-attrs"]):
params = get_set_params(v0["query-attrs"])
decls.append("void PMIU_msg_set_query_%s(%s, %s);" % (name, std_query, params))
params = get_get_params(v0["query-attrs"])
decls.append("int PMIU_msg_get_query_%s(%s, %s);" % (name, std_get, params))
if "response-name" in v0:
if len(v0["response-attrs"]):
params = get_set_params(v0["response-attrs"])
decls.append("int PMIU_msg_set_response_%s(%s, %s);" % (name, std_response, params))
params = get_get_params(v0["response-attrs"])
decls.append("int PMIU_msg_get_response_%s(%s, %s);" % (name, std_get, params))
if len(decls):
print("/* PMIU_CMD_%s */" % NAME, file=Out)
for l in decls:
print(l, file=Out)
def dump_cmd_to_id(Out):
print("int PMIU_msg_cmd_to_id(const char *cmd)", file=Out)
print("{", file=Out)
t_if = " if"
for NAME in G.cmd_list:
cmp_list = []
prev = {}
for v in G.pmi_vers:
if NAME in v and "query-name" in v[NAME]:
t = v[NAME]["query-name"]
if t not in prev:
cmp_list.append("strcmp(cmd, \"%s\") == 0" % t)
prev[t] = 1
if len(cmp_list):
print(t_if + " (" + ' || '.join(cmp_list) + ") {", file=Out)
print(" return PMIU_CMD_%s;" % NAME, file=Out)
t_if = " } else if"
print(" } else {", file=Out)
print(" return PMIU_CMD_INVALID;", file=Out)
print(" }", file=Out)
print("}", file=Out)
def dump_id_to_str(Out, query):
namekey = query + "-name"
print("const char *PMIU_msg_id_to_%s(int version, int cmd_id)" % query, file=Out)
print("{", file=Out)
print(" switch(cmd_id) {", file=Out)
for NAME in G.cmd_list:
cmp_list = []
prev = {}
for v in G.pmi_vers:
if NAME in v and namekey in v[NAME]:
t = v[NAME][namekey]
if t not in prev:
cmp_list.append(t)
prev[t] = 1
if len(cmp_list) > 0:
print(" case PMIU_CMD_%s:" % NAME, file=Out)
if len(cmp_list) == 1:
print(" return \"%s\";" % cmp_list[0], file=Out)
else:
print(" return (version == PMIU_WIRE_V1) ? \"%s\" : \"%s\";" % (cmp_list[0], cmp_list[1]), file=Out)
print(" default:", file=Out)
print(" return NULL;", file=Out)
print(" }", file=Out)
print("}", file=Out)
def dump_id_to_response(Out):
print("const char *PMIU_msg_id_to_response(int version, int cmd_id)", file=Out)
print("{", file=Out)
print(" switch(cmd_id) {", file=Out)
print(" default:", file=Out)
print(" return NULL;", file=Out)
print(" }", file=Out)
print("}", file=Out)
def dump_funcs(Out):
std_query="struct PMIU_cmd *pmi_query, int version, bool is_static"
std_response="struct PMIU_cmd *pmi_query, struct PMIU_cmd *pmi_resp, bool is_static"
std_get="struct PMIU_cmd *pmi"
def dump_if_version(t_if, version, is_set, is_query):
if re.match(r"1\.", version):
ver = "PMIU_WIRE_V1"
else:
ver = "PMIU_WIRE_V2"
if is_set:
if is_query:
print(t_if + " (version == %s) {" % ver, file=Out)
else:
print(t_if + " (pmi_query->version == %s) {" % ver, file=Out)
else:
print(t_if + " (pmi->version == %s) {" % ver, file=Out)
def dump_attrs(spaces, is_set, is_query, attrs, attrs0):
non_optional = 0
for i in range(len(attrs)):
a = attrs[i]
var = get_var(attrs0[i][0])
if is_query:
pmi = "pmi_query"
else:
pmi = "pmi_resp"
if a[1] == "INTEGER":
kind = "int"
elif a[1] == "STRING":
kind = "str"
elif a[1] == "BOOLEAN":
kind = "bool"
else:
raise Exception("Unhandled kind: " + a[1])
if is_set:
pmiu = "PMIU_cmd_add_" + kind
print(spaces + "%s(%s, \"%s\", %s);" % (pmiu, pmi, a[0], var), file=Out)
else:
if RE.match(r'.*optional=(\S+)', a[2]):
dflt = RE.m.group(1)
pmiu = "PMIU_CMD_GET_%sVAL_WITH_DEFAULT" % kind.upper()
print(spaces + "%s(pmi, \"%s\", *%s, %s);" % (pmiu, a[0], var, dflt), file=Out)
else:
pmiu = "PMIU_CMD_GET_%sVAL" % kind.upper()
print(spaces + "%s(pmi, \"%s\", *%s);" % (pmiu, a[0], var), file=Out)
non_optional += 1
return non_optional
def dump_it(NAME, v_list, is_set, is_query, attrs):
print("", file=Out)
ret_errno = True
if is_set:
params = get_set_params(attrs)
if is_query:
ret_errno = False
print("void PMIU_msg_set_query_%s(%s, %s)" % (name, std_query, params), file=Out)
else:
print("int PMIU_msg_set_response_%s(%s, %s)" % (name, std_response, params), file=Out)
else:
params = get_get_params(attrs)
if is_query:
print("int PMIU_msg_get_query_%s(%s, %s)" % (name, std_get, params), file=Out)
else:
print("int PMIU_msg_get_response_%s(%s, %s)" % (name, std_get, params), file=Out)
print("{", file=Out)
if ret_errno:
print(" int pmi_errno = PMIU_SUCCESS;", file=Out)
print("", file=Out)
if is_set:
if is_query:
print(" PMIU_msg_set_query(pmi_query, version, PMIU_CMD_%s, is_static);" % NAME, file=Out)
else:
print(" PMIU_Assert(pmi_query->cmd_id == PMIU_CMD_%s);" % NAME, file=Out)
print(" pmi_errno = PMIU_msg_set_response(pmi_query, pmi_resp, is_static);", file=Out)
attrs_b = None
if len(v_list) > 1:
if is_query:
attrs_b = v_list[1]["query-attrs"]
else:
attrs_b = v_list[1]["response-attrs"]
non_optional = 0
if attrs_b is None or attrs_identical(attrs, attrs_b):
non_optional += dump_attrs(" ", is_set, is_query, attrs, attrs)
else:
dump_if_version(" if", v_list[0]["version"], is_set, is_query)
non_optional += dump_attrs(" ", is_set, is_query, attrs, attrs)
dump_if_version(" } else if", v_list[1]["version"], is_set, is_query)
non_optional += dump_attrs(" ", is_set, is_query, attrs_b, attrs)
if ret_errno:
print(" } else {", file=Out)
print(" PMIU_ERR_SETANDJUMP(pmi_errno, PMIU_FAIL, \"invalid version\");", file=Out)
non_optional += 1
print(" }", file=Out)
if non_optional > 0:
print("", file=Out)
print(" fn_exit:", file=Out)
print(" return pmi_errno;", file=Out)
print(" fn_fail:", file=Out)
print(" goto fn_exit;", file=Out)
elif ret_errno:
print("", file=Out)
print(" return pmi_errno;", file=Out)
print("}", file=Out)
for NAME in G.cmd_list:
name = NAME.lower()
v_list = []
for v in G.pmi_vers:
if NAME in v:
v_list.append(v[NAME])
v0 = v_list[0]
if "query-name" in v0:
if len(v0["query-attrs"]):
dump_it(NAME, v_list, True, True, v0["query-attrs"])
dump_it(NAME, v_list, False, True, v0["query-attrs"])
if "response-name" in v0:
if len(v0["response-attrs"]):
dump_it(NAME, v_list, True, False, v0["response-attrs"])
dump_it(NAME, v_list, False, False, v0["response-attrs"])
# ----------------------
msg_h = "src/pmi_msg.h"
msg_c = "src/pmi_msg.c"
with open(msg_h, "w") as Out:
dump_copyright(Out)
INC = get_include_guard(msg_h)
print("#ifndef %s" % INC, file=Out)
print("#define %s" % INC, file=Out)
print("", file=Out)
dump_enums(Out)
print("", file=Out)
dump_decls(Out)
print("", file=Out)
print("#endif /* %s */" % INC, file=Out)
with open(msg_c, "w") as Out:
dump_copyright(Out)
for inc in ["pmi_config", "mpl", "pmi_util", "pmi_common", "pmi_wire", "pmi_msg"]:
print("#include \"%s.h\"\n" % inc, file=Out)
dump_cmd_to_id(Out)
print("", file=Out)
dump_id_to_str(Out, "query")
print("", file=Out)
dump_id_to_str(Out, "response")
print("", file=Out)
dump_funcs(Out)
#---- utils ------------------------------------
def get_set_params(attrs):
tlist = []
for a in attrs:
if len(a) == 3:
# name, kind, tail
tlist.append(get_kind(a[1]) + get_var(a[0]))
return ', '.join(tlist)
def get_get_params(attrs):
tlist = []
for a in attrs:
if len(a) == 3:
# name, kind, tail
tlist.append(get_kind(a[1]) + '*' + get_var(a[0]))
return ', '.join(tlist)
def get_var(name):
return name.replace("-", "_")
def get_kind(kind):
if kind == "INTEGER":
return "int "
elif kind == "STRING":
return "const char *"
elif kind == "BOOLEAN":
return "bool "
else:
raise Exception("unexpected kind " + kind)
def attrs_identical(attrs_a, attrs_b):
if len(attrs_a) != len(attrs_b):
return False
for i in range(len(attrs_a)):
a = attrs_a[i]
b = attrs_b[i]
if a[0] != b[0] or a[1] != b[1] or a[2] != b[2]:
return False
return True
# ---- dump utils -----------------------------------------
def dump_copyright(out):
print("/*", file=out)
print(" * Copyright (C) by Argonne National Laboratory", file=out)
print(" * See COPYRIGHT in top-level directory", file=out)
print(" */", file=out)
print("", file=out)
print("/* ** This file is auto-generated, do not edit ** */", file=out)
print("", file=out)
def get_include_guard(h_file):
h_file = re.sub(r'.*\/', '', h_file)
h_file = re.sub(r'\.', '_', h_file)
return h_file.upper() + "_INCLUDED"
# ---------------------------------------------------------
if __name__ == "__main__":
main()
|
6,796 | 06caee24b9d0bb78e646f27486b9a3a0ed5f2502 | #ribbon_a and ribbon_b are the two important variables here
ribbon_a=None
ribbon_b=None
#Notes:
# - As it turns out, the internal ADC in the Teensy is NOT very susceptible to fluctuations in the Neopixels' current...BUT...the ADS1115 IS.
# Therefore, I think a better model would ditch the ADS1115 alltogether - replacing it with a simple 8x toggleable amp for dual touches.
# - Shouldn't cause errors. No scl/sda pullup means the board isn't connected. No i2c at 48 means the individual chip isn't powered or connected etc. I just ficed a bad solder joint that took a while to flare up.......maybe this is what happened with the old lightwave? I was too quick with the solder joints, leaving a bubble that didn't touch it because of some stress bs later on?
__all__=['ribbon_a','ribbon_b']
from urp import *
import time
import board
import busio
import adafruit_ads1x15.ads1115 as ADS
from collections import OrderedDict
from adafruit_ads1x15.ads1x15 import Mode
from adafruit_ads1x15.analog_in import AnalogIn as ADS1115_AnalogIn
from digitalio import DigitalInOut, Direction, Pull
from analogio import AnalogIn as Internal_AnalogIn
from tools import *
import storage
from linear_modules import *
import lightboard.neopixels as neopixels
import time
from micropython import const
i2c = busio.I2C(board.SCL, board.SDA, frequency=1000000)# Create the I2C bus with a fast frequency
#I2C addresses for ADS1115's: 0x48 and 0x4a for Ribbon A and Ribbon B respectively
ads_a = ADS.ADS1115(i2c,address=0x48)
ads_b = ADS.ADS1115(i2c,address=0x4a)
data_rate=const(860) # Maximum number of samples per second
ads_a.data_rate = data_rate
ads_b.data_rate = data_rate
ads_gain_single=const(1)
ads_gain_dual =const(8) #Uses 100kΩ
#Change the gains depending on whether you're measuring dual or single touches
ads_a.gain=ads_gain_single
ads_b.gain=ads_gain_single
ads_a_a0 = ADS1115_AnalogIn(ads_a, ADS.P0)
ads_a_a1 = ADS1115_AnalogIn(ads_a, ADS.P1)
ads_a_a2 = ADS1115_AnalogIn(ads_a, ADS.P2)
ads_a_single=ads_a_a0
ads_a_dual_top=ads_a_a1
ads_a_dual_b=ads_a_a2
rib_a_mid = Internal_AnalogIn(board.D26)
ads_b_a0 = ADS1115_AnalogIn(ads_b, ADS.P0)
ads_b_a1 = ADS1115_AnalogIn(ads_b, ADS.P1)
ads_b_a2 = ADS1115_AnalogIn(ads_b, ADS.P2)
ads_b_single=ads_b_a0
ads_b_dual_top=ads_b_a1
ads_b_dual_b=ads_b_a2
rib_b_mid = Internal_AnalogIn(board.D27)
single_pull=DigitalInOut(board.D32)
single_pin =DigitalInOut(board.D31)
dual_pin_2 =DigitalInOut(board.D25)
dual_pin_1 =DigitalInOut(board.D24)
single_pull.direction=Direction.OUTPUT
single_pin .direction=Direction.OUTPUT
dual_pin_2 .direction=Direction.OUTPUT
dual_pin_1 .direction=Direction.OUTPUT
def activate_single_touch_transistors():
single_pin.value=True
dual_pin_1 .value=False
dual_pin_2 .value=False
def activate_dual_touch_transistors():
single_pin.value=False
dual_pin_1 .value=True
dual_pin_2 .value=True
class I2CError(OSError):
pass
class Ribbon:
ADS_BIN_SIZE=100
RIB_BIN_SIZE=100
CALIBRATION_FOLDER='/generated/calibrations/ribbons'
def __init__(self,name,rib_mid,ads,ads_single,ads_dual_top,ads_dual_bot):
self.name=name
self.rib_mid=rib_mid
self.ads=ads
self.ads_single=ads_single
self.ads_dual_top=ads_dual_top
self.ads_dual_bot=ads_dual_bot
dual_touch_top_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_dual_touch_top_to_neopixel_calibration' )
dual_touch_bot_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_dual_touch_bot_to_neopixel_calibration' )
single_touch_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_single_touch_to_neopixel_calibration' )
cheap_single_touch_to_neopixel_calibration_path = path_join(self.CALIBRATION_FOLDER,self.name+'_cheap_single_touch_to_neopixel_calibration')
self.dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=dual_touch_top_to_neopixel_calibration_path ,auto_load=True)
self.dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=dual_touch_bot_to_neopixel_calibration_path ,auto_load=True)
self.single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=single_touch_to_neopixel_calibration_path ,auto_load=True)
self.cheap_single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.RIB_BIN_SIZE,file_path=cheap_single_touch_to_neopixel_calibration_path,auto_load=True)
self.previous_gate=False
self.dual_num_fingers=0
dual_filter_moving_average_length=3
dual_filter_soft_tether_size=.1
dual_filter_tether_size=.05
self.dual_bot_filter=NoiseFilter(moving_average_length=dual_filter_moving_average_length,soft_tether_size=dual_filter_soft_tether_size,tether_size=dual_filter_tether_size)
self.dual_top_filter=NoiseFilter(moving_average_length=dual_filter_moving_average_length,soft_tether_size=dual_filter_soft_tether_size,tether_size=dual_filter_tether_size)
self.cheap_single_filter=NoiseFilter(moving_average_length=1,soft_tether_size=.3,tether_size=.01,moving_median_length=1)
@property
def is_calibrated(self):
return self.dual_touch_top_to_neopixel_calibration .is_fitted and \
self.dual_touch_bot_to_neopixel_calibration .is_fitted and \
self.single_touch_to_neopixel_calibration .is_fitted and \
self.cheap_single_touch_to_neopixel_calibration.is_fitted
def dual_touch_reading(self):
reading=DualTouchReading(self)
#DualTouchReading objects don't have a gate as of right now (though they will probably soon - we can get the gate by comparing the top value to the bot value and setting a threshold)
return reading
def single_touch_reading(self):
reading=SingleTouchReading(self)
self.previous_gate=reading.gate
return reading
def cheap_single_touch_reading(self):
reading=CheapSingleTouchReading(self)
self.previous_gate=reading.gate
return reading
def processed_single_touch_reading(self,blink=False):
# if not self.is_calibrated: #Unnessecary CPU time...its cheap but so unimportant...
# print("Ribbon.processed_single_touch_reading: Warning: This ribbon is not calibrated!")
reading=ProcessedSingleTouchReading(self,blink=blink)
self.previous_gate=reading.gate
return reading
def processed_cheap_single_touch_reading(self,blink=False):
reading=ProcessedCheapSingleTouchReading(self,blink=blink)
self.previous_gate=reading.gate
return reading
def processed_dual_touch_reading(self,blink=False):
reading=ProcessedDualTouchReading(self,blink=blink)
self.previous_gate=reading.gate
return reading
def run_calibration(self,samples_per_pixel=25):
import lightboard.display as display
import lightboard.neopixels as neopixels
import lightboard.buttons as buttons
import lightboard.widgets as widgets
buttons.metal_press_viewer.value #Reset it - so it doesn't immediately press by accident
def ask_to_try_again():
if widgets.input_yes_no("Would you like to try calibrating again?"):
self.run_calibration(samples_per_pixel)
start_from_scratch = True # widgets.input_yes_no('Start from scratch?\nNo: Modify current calibration\nYes: Create entirely new calibration')
dual_touch_top_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=self.dual_touch_top_to_neopixel_calibration .file_path,auto_load=not start_from_scratch)
dual_touch_bot_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=self.dual_touch_bot_to_neopixel_calibration .file_path,auto_load=not start_from_scratch)
single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.ADS_BIN_SIZE,file_path=self.single_touch_to_neopixel_calibration .file_path,auto_load=not start_from_scratch)
cheap_single_touch_to_neopixel_calibration = HistogramFitter(bin_size=self.RIB_BIN_SIZE,file_path=self.cheap_single_touch_to_neopixel_calibration.file_path,auto_load=not start_from_scratch)
buttons.metal_button.color=(255,0,255)
def show_instructions():
display.set_text('Running calibration on ribbon '+self.name+'\nPlease press the glowing green buttons until\nthe red dot is barely on the ribbon')
buttons.set_green_button_lights(1,1,0,0)
show_instructions()
button_press_next_neopixel=buttons.ButtonPressViewer(buttons.green_button_1)
button_press_prev_neopixel=buttons.ButtonPressViewer(buttons.green_button_3)
def display_neopixel_calibration(cursor_index,r,g,b,highlighted_pixels=[]):
nonlocal calibrated_pixels
neopixels.draw_all_off()
for pixel in highlighted_pixels:
neopixels.draw_dot(pixel,0,10,0)
neopixels.draw_dot(cursor_index,r,g,b)
neopixels.refresh()
i=0
i=neopixels.first
display_neopixel_calibration(i,63,0,0)
buttons.metal_press_viewer.value #Reset it - so it doesn't immediately press by accident
while True:
reading=self.cheap_single_touch_reading()
if reading.gate:
break
refresh_flag=False
if button_press_next_neopixel.value:
i+=1
refresh_flag=True
if button_press_prev_neopixel.value:
i-=1
refresh_flag=True
if refresh_flag:
i=min(neopixels.length-1,max(0,i))
display_neopixel_calibration(i,63,0,0)
if buttons.metal_press_viewer.value:
if widgets.input_yes_no("Do you want to cancel calibration?\n(All progress will be lost)"):
#NOTE: This code block is duplicated!
ask_to_try_again()
return
else:
show_instructions()
button_press_skip =buttons.ButtonPressViewer(buttons.green_button_1)
button_press_back =buttons.ButtonPressViewer(buttons.green_button_3)
button_press_finished=buttons.ButtonPressViewer(buttons.green_button_2)
buttons.set_green_button_lights(1,1,0,0)
def show_instructions():
display.set_text('Running calibration on ribbon '+self.name+'\nPlease press cyan dots on ribbon\nuntil they become orange\nPress the 2rd green button when you\'re done\n(If the 2rd green button isnt lit, calibrate at least two points)\nPress button 1 to skip the current dot\nPress button 3 to go back a dot')
show_instructions()
finished=False
calibrated_pixels=set()
while not finished:
i=max(0,min(i,neopixels.length-1))
display_neopixel_calibration(i,0,63,63,calibrated_pixels)
dual_a_samples=[]
dual_b_samples=[]
single_samples=[]
cheap_samples =[]
pixel_num_samples=0
buttons.metal_press_viewer.value #Reset it - so it doesn't immediately press by accident
while True:
buttons.green_button_3.light=len(calibrated_pixels)>=2
if buttons.metal_press_viewer.value:
if widgets.input_yes_no("Do you want to cancel calibration?\n(All progress will be lost)"):
#NOTE: This code block is duplicated!
ask_to_try_again()
return
else:
show_instructions()
if button_press_skip.value:
break
if button_press_back.value:
i-=2
break
if button_press_finished.value and len(calibrated_pixels)>=2:
if widgets.input_yes_no("Do you want to test your calibration?\nYes: Test it!\nNo: I'm done calibrating!"):
#This UI is a bit janky....should use better messages. But whatevs...this is just calibration after all...
with buttons.TemporaryButtonLights():
self.test_smooth_demo(single_touch_to_neopixel_calibration,dual_touch_top_to_neopixel_calibration,dual_touch_bot_to_neopixel_calibration)
show_instructions()
elif widgets.input_yes_no("Are you sure your're done\ncalibrating this ribbon?"):
finished=True
break
else:
show_instructions()
if len(cheap_samples)>=samples_per_pixel:
dual_touch_top_to_neopixel_calibration .add_sample(median(dual_a_samples),i)
dual_touch_bot_to_neopixel_calibration .add_sample(median(dual_b_samples),i)
single_touch_to_neopixel_calibration .add_sample(median(single_samples),i)
cheap_single_touch_to_neopixel_calibration.add_sample(median(cheap_samples ),i)
calibrated_pixels.add(i)
break
if self.cheap_single_touch_reading().gate:
with neopixels.TemporarilyTurnedOff():
cheap_single_touch_reading=self.cheap_single_touch_reading()
single_touch_reading =self.single_touch_reading()
dual_touch_reading =self.dual_touch_reading()
if single_touch_reading.gate and cheap_single_touch_reading.gate:
dual_a_samples.append(dual_touch_reading .raw_a )
dual_b_samples.append(dual_touch_reading .raw_b )
single_samples.append(single_touch_reading .raw_value)
cheap_samples .append(cheap_single_touch_reading.raw_value)
pixel_num_samples+=1
else:
#Accidently remove finger? Cancel it...try again.
dual_a_samples.clear()
dual_b_samples.clear()
single_samples.clear()
cheap_samples .clear()
i+=1
display_neopixel_calibration(i,63,31,0,calibrated_pixels)
while self.cheap_single_touch_reading().gate:
pass
buttons.set_green_button_lights(0,0,0,0)
buttons.metal_button.color=(0,1,1)
neopixels.turn_off()
display.set_text('Finished calibration on ribbon '+self.name+'\nTry the ribbon out to see if you like it\nAlso rinting out sensor values to serial for a demo\n(Watch in the arduino plotter)\nPress the metal button when you\'re done')
while not buttons.metal_press_viewer.value:
if self.cheap_single_touch_reading().gate:
with neopixels.TemporarilyTurnedOff():
cheap_single_touch_reading=self.cheap_single_touch_reading()
single_touch_reading =self.single_touch_reading()
dual_touch_reading =self.dual_touch_reading()
dual_top = dual_touch_top_to_neopixel_calibration(dual_touch_reading .raw_a )
dual_bot = dual_touch_bot_to_neopixel_calibration(dual_touch_reading .raw_b )
single = single_touch_to_neopixel_calibration (single_touch_reading.raw_value)
cheap_single=cheap_single_touch_to_neopixel_calibration(cheap_single_touch_reading.raw_value)
if cheap_single_touch_reading.gate and single_touch_reading.gate:
neopixels.display_dot(int(cheap_single),0,128,0)
print(dual_top,dual_bot,single,cheap_single)
self.test_smooth_demo(single_touch_to_neopixel_calibration,dual_touch_top_to_neopixel_calibration,dual_touch_bot_to_neopixel_calibration)
if widgets.input_yes_no("Would you like to save this\ncalibration for ribbon "+self.name+"?"):
self.dual_touch_top_to_neopixel_calibration = dual_touch_top_to_neopixel_calibration
self.dual_touch_bot_to_neopixel_calibration = dual_touch_bot_to_neopixel_calibration
self.single_touch_to_neopixel_calibration = single_touch_to_neopixel_calibration
self.cheap_single_touch_to_neopixel_calibration = cheap_single_touch_to_neopixel_calibration
self.dual_touch_top_to_neopixel_calibration .save_to_file()
self.dual_touch_bot_to_neopixel_calibration .save_to_file()
self.single_touch_to_neopixel_calibration .save_to_file()
self.cheap_single_touch_to_neopixel_calibration.save_to_file()
display.set_text("Saved calibrations for ribbon "+self.name+"!")
time.sleep(2)
else:
display.set_text("Cancelled. No calibrations were saved.")
time.sleep(2)
ask_to_try_again()
return
def test_smooth_demo(
self,
single_touch_to_neopixel_calibration=None,
dual_touch_top_to_neopixel_calibration=None,
dual_touch_bot_to_neopixel_calibration=None):
import lightboard.buttons as buttons
import lightboard.neopixels as neopixels
import lightboard.display as display
if single_touch_to_neopixel_calibration is None: single_touch_to_neopixel_calibration =self.single_touch_to_neopixel_calibration
if dual_touch_top_to_neopixel_calibration is None: dual_touch_top_to_neopixel_calibration=self.dual_touch_top_to_neopixel_calibration
if dual_touch_bot_to_neopixel_calibration is None: dual_touch_bot_to_neopixel_calibration=self.dual_touch_bot_to_neopixel_calibration
buttons.metal_button.color=(1,0,1)
buttons.set_green_button_lights(0,0,0,0)
display.set_text("Smooth demo for ribbon %s\nPress metal to exit")
#This is a show-offy demo lol. Try miscalibrating it such that a tiny vibrato makes it move from one side of the lightwave to the otehr...
def mean(l):
l=list(l)
return sum(l)/len(l)
def std(l):
u=mean(l)
return mean((x-u)**2 for x in l)**.5
class SuperSmooth:
#A linear module created from the original code of this demo.
#When DISCRETE is True, it's so sensitive that it can recognize individual ADS readings without noise when the finger is still.
#Used to smooth ADS readings.
def __init__(self):
self.DISCRETE=True
self.N=10
self.V=[]
self.tet2=Tether(1)
self.tether=SoftTether(size=5)
self.value=None
def __call__(self,value):
raw_value=value
self.V.append(raw_value)
while len(self.V)>self.N:
del self.V[0]
val=self.tether(mean(self.V))
if self.DISCRETE:
Val=(self.tet2(int(val)))
else:
Val=(val)
self.value=Val
return Val
def clear(self):
self.V.clear()
self.tether.value=None
super_smooth_single =SuperSmooth()
super_smooth_dual_top=SuperSmooth()
super_smooth_dual_bot=SuperSmooth()
while not buttons.metal_press_viewer.value:
single=self.single_touch_reading()
if single.gate:
dual=self.dual_touch_reading()
val_top=dual_touch_top_to_neopixel_calibration(super_smooth_dual_top(dual.raw_a))
val_bot=dual_touch_bot_to_neopixel_calibration(super_smooth_dual_bot(dual.raw_b))
val=single_touch_to_neopixel_calibration(super_smooth_single(single.raw_value))
neopixels.draw_all_off()
neopixels.draw_dot(floor(val_top), 0,30, 15)
neopixels.draw_dot(floor(val_bot),15,30, 0)
neopixels.draw_dot(floor(val ),64, 0,128)
neopixels.refresh()
else:
super_smooth_single .clear()
super_smooth_dual_top.clear()
super_smooth_dual_bot.clear()
neopixels.turn_off()
class NoiseFilter:
#This is a LinearModule
#It should be cleared whever the gate is off
def __init__(self,moving_average_length=10,
soft_tether_size =5,
tether_size =1,
moving_median_length =1):
self.moving_average=MovingAverage(moving_average_length)
self.soft_tether=SoftTether(size=soft_tether_size)
self.tether=Tether(size=tether_size)
self.moving_median=MovingMedian(moving_median_length)
def __call__(self,value):
value=self.moving_average(value)
value=self.soft_tether (value)
value=self.tether (value)
value=self.moving_median (value)
return value
def clear(self):
self.soft_tether .clear()
self.tether .clear()
self.moving_average.clear()
self.moving_median .clear()
def copy(self):
#Create a duplicate filter with the same parameters
return NoiseFilter(self.moving_average.length,self.soft_tether.size,self.tether.size)
class SingleTouchReading:
__slots__=['gate','raw_lower','raw_upper','raw_gap', 'raw_value']
GATE_THRESHOLD=500 #This needs to be calibrated after observing the raw_gap when touching and not touching the ribbon. You can do this automatically with some fancy algorithm, or you can just look at the serial monitor while printing reading.raw_gap over and over again
def __init__(self,ribbon):
self.ribbon=ribbon
self.read_raw_lower()
self.read_raw_upper()
self.process_readings()
def prepare_to_read(self):
activate_single_touch_transistors()
self.ribbon.ads.mode=ADS.Mode.SINGLE
self.ribbon.ads.gain=ads_gain_single
def read_raw_lower(self):
single_pull.value=False
self.prepare_to_read()
try:
self.raw_lower=self.ribbon.ads_single.value
except OSError as exception:
raise I2CError(exception)
def read_raw_upper(self):
single_pull.value=True
self.prepare_to_read()
try:
self.raw_upper=self.ribbon.ads_single.value
except OSError as exception:
raise I2CError(exception)
def process_readings(self):
self.raw_gap=abs(self.raw_upper-self.raw_lower)
self.gate=self.raw_gap<self.GATE_THRESHOLD
self.raw_value=(self.raw_upper+self.raw_lower)/2
class ContinuousSingleTouchReading(SingleTouchReading):
#Should be similar to SingleTouchReading, but much faster when not using DualTouchReading
#WARNING AND TODO: This function isn't currently doing enough to flush out anything. Perhaps continous can use the CheapSingleTouchReading's gate, and a single non-wobbling single_pull value
@staticmethod
def prepare_to_read():
activate_single_touch_transistors()
ads.mode=ADS.Mode.CONTINUOUS
ads.gain=ads_gain_single
self.ribbon.ads_single.value #Flush out the current reading of the ADC, in-case we changed single_pull in the middle of the ADS's reading (which happens 99% of the time if we don't do this lol - making detecting the gate practically useless)
class CheapSingleTouchReading(SingleTouchReading):
#TODO: The Teensy's internal ADC is wonked. Between around raw values 30000 and 35000, it jumps (whereas the ADS1115 doesn't jump).
# Calibration with respect to the ADS1115's non-cheap single touch should mitigate this problem
# Even though the raw range is the same for both analog_in and ads_single, we need a larger GATE_THRESHOLD for CheapSingleTouchReading beacause of this flaw in Teensy's ADC.
#Uses the Teensy's internal ADC that can read up to 6000x per second
#TODO: Implement a variation of the SingleTouchReading class called quick-gate check via the Teensy's internal ADC to save a bit of time and get more accurate results on the dual touch readings (because then we can check both upper and lower both before and after the dual readings which means less spikes)
#GATE_THRESHOLD is proportional to a threshold of the voltage gap between LOW and HIGH
#When GATE_THRESHOLD is small, there are less unwanted jumps when barely pressing the ribbon. But if its too small, it won't register touches.
GATE_THRESHOLD=1500 #This was measured to be a good value for most of the ribbon
GATE_THRESHOLD=4000 #But, the ribbon has a kink in the middle that jumps a lot voltage over the space of a milimeter.
def read_raw_lower(self):
self.prepare_to_read()
single_pull.value=False
self.raw_lower=self.ribbon.rib_mid.value
def read_raw_upper(self):
self.prepare_to_read()
single_pull.value=True
self.raw_upper=self.ribbon.rib_mid.value
class DualTouchReading:
__slots__ = ['raw_a', 'raw_b']
def __init__(self,ribbon):
self.ribbon=ribbon
self.prepare_to_read()
try:
self.raw_a=self.ribbon.ads_dual_top.value
self.raw_b=self.ribbon.ads_dual_bot.value
except OSError as exception:
raise I2CError(exception)
def prepare_to_read(self):
activate_dual_touch_transistors()
self.ribbon.ads.gain=ads_gain_dual
class ProcessedDualTouchReading:
__slots__=['gate','bot','top','mid','num_fingers','old','new']
DELTA_THRESHOLD=-4 # A distance, measured in neopixel widths, that the two dual touches can be apart from one another before registering as not being touched. (This is because, as it turns out, it can sometimes take more than one sample for dual touch values to go all the way back to the top after releasing your finger from the ribbon)
#You want to calibrate DELTA_THRESHOLD such that it's high enough to keep good readings once you release your finger, but low enough that it doesn't require pressing down too hard to activate.
#DELTA_THRESHOLD can be a negative value.
#DELTA_THRESHOLD might need to be changed if you calibrate with a pencil eraser instead of your fingertip, because the pencil eraser is a narrower touch area etc.
#You should always calibrate using your finger for this reason...
TWO_TOUCH_THRESHOLD=2 #A distance, measured in neopixel widths, that the dual readings must be apart from each other to register as
TWO_TOUCH_THRESHOLD_SLACK=.05 #A bit of hysterisis used here...like a tether. Basically, to prevent flickering on the bonudary, to switch between two touch and one touch you must move this much distance.
def __init__(self,ribbon,blink=False):
#If self.gate is False, your code shouldn't try to check for a .bot, .top, or .middle value - as it was never measured
#If your fingers are pressing the ribbon in two different places, after calibration the 'top' value should be above the 'bot' value
# In the event that the hardware of the z
self.ribbon=ribbon
def clear_filters():
ribbon.cheap_single_filter.clear()
ribbon.dual_bot_filter.clear()
ribbon.dual_top_filter.clear()
previous_gate=ribbon.previous_gate
single_before=ribbon.processed_cheap_single_touch_reading()
if not single_before.gate:
#Don't waste time with the dual touch reading if one of the gates is False
self.gate=False
clear_filters()
return
with neopixels.TemporarilyTurnedOff() if blink else EmptyContext():
dual_reading=ribbon.dual_touch_reading()
single_after=ribbon.cheap_single_touch_reading()
if not single_after.gate:
self.gate=False
clear_filters()
return
if not previous_gate:
clear_filters()
self.gate=True #single_before.gate and single_after.gate
#TODO: Lower the DELTA_THRESHOLD and use self.middle whenever it gets too crazy; that way we can have maximum sensitivity and never miss a sample...
raw_mid=(single_before.raw_value+single_after.raw_value)/2
raw_top=dual_reading.raw_a
raw_bot=dual_reading.raw_b
top=raw_top
bot=raw_bot
mid=raw_mid
top=ribbon.dual_touch_top_to_neopixel_calibration(top)
bot=ribbon.dual_touch_bot_to_neopixel_calibration(bot)
mid=ribbon.cheap_single_touch_to_neopixel_calibration(mid)
mid=ribbon.cheap_single_filter(mid)
#I made a mistake on the lightboard...one of the resistors is too large or small (probably resistor tolerance issues)
#As a result, one of the ribbons' dual touches doesn't work on the far ends of the ribbon
#When this happens, the ADS's reading saturates to 32767 (with the current gain)
#Instea of decreasing resolution by turning down the gain, or leaving a touch area unuseable, I'll just do this:
#Note: Another valid solution is turning down the ADS1115's gain. This will solve the problem but decrease resolution...
if int(raw_top)==32767: top=mid
if int(raw_bot)==32767: bot=mid
delta=top-bot
# old_num_fingers=ribbon.dual_num_fingers
# changed_num_fingers=False
if delta<=self.DELTA_THRESHOLD:
ribbon.dual_num_fingers=1
# changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers
elif not previous_gate:
ribbon.dual_num_fingers = 2 if delta>self.TWO_TOUCH_THRESHOLD else 1
# changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers
elif ribbon.dual_num_fingers == 1 and delta>self.TWO_TOUCH_THRESHOLD+self.TWO_TOUCH_THRESHOLD_SLACK:
ribbon.dual_num_fingers = 2
# changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers
elif ribbon.dual_num_fingers == 2 and delta<self.TWO_TOUCH_THRESHOLD-self.TWO_TOUCH_THRESHOLD_SLACK:
ribbon.dual_num_fingers = 1
# changed_num_fingers=old_num_fingers!=ribbon.dual_num_fingers
self.num_fingers=ribbon.dual_num_fingers
# if changed_num_fingers:
# clear_filters()
if self.num_fingers==1:
#Even if the two-touches can't be used, we can still use the single cheap touch value
#Originally, this set gate to False. Now it doesn't.
bot=top=mid
elif bot>top:
#The only time self.bot>self.top is when your're barely pressing on the ribbon at all...
#...we can average these two values out to get a single, more reasonable value
bot=top=(bot+top)/2
#The older and newer dual touch positions. Only different when num_fingers>1
if not hasattr(ribbon,'previous_dual_old'):
ribbon.previous_dual_old=mid
old,new=sorted([bot,top],key=lambda pos:abs(pos-ribbon.previous_dual_old))
self.top=ribbon.dual_top_filter(top)
self.bot=ribbon.dual_bot_filter(bot)
self.mid=mid
self.old=old
self.new=new
ribbon.previous_dual_old=old
class ProcessedSingleTouchReading:
def __init__(self,ribbon,blink=False):
self.ribbon=ribbon
if ribbon.previous_gate:
#If it was previously pressed, don't check the gate with the expensive reading...
with neopixels.TemporarilyTurnedOff() if blink else EmptyContext():
single_touch_reading=ribbon.single_touch_reading()
self.gate=single_touch_reading.gate
else:
cheap_single_touch_reading=ribbon.cheap_single_touch_reading()
if cheap_single_touch_reading.gate:
with neopixels.TemporarilyTurnedOff() if blink else EmptyContext():
single_touch_reading=ribbon.single_touch_reading()
self.gate=single_touch_reading.gate
else:
self.gate=False
if self.gate:
self.raw_value=single_touch_reading.raw_value
self.value=ribbon.single_touch_to_neopixel_calibration(self.raw_value)
class ProcessedCheapSingleTouchReading:
def __init__(self,ribbon,blink=False):
self.ribbon=ribbon
with neopixels.TemporarilyTurnedOff() if blink else EmptyContext():
if not ribbon.previous_gate:
ribbon.cheap_single_touch_reading()#Sometimes it spikes on the first value for some reason...idk why
cheap_single_touch_reading=ribbon.cheap_single_touch_reading()
self.gate=cheap_single_touch_reading.gate
if self.gate:
self.raw_value=cheap_single_touch_reading.raw_value
self.value=ribbon.cheap_single_touch_to_neopixel_calibration(self.raw_value)
self.value=ribbon.cheap_single_filter(self.value)
else:
ribbon.cheap_single_filter.clear()
# pass
def test_ribbon_raw_uart(ribbon):
#Use this test to print all (raw, uncalibrated) ribbon values to uart
#Then, you can view them in an arduino grapher
import lightboard.buttons as buttons
import lightboard.display as display
display.set_text('Running raw uart test\nPress metal button\nto end this test\n\nThe green buttons show\ncheap_gate and single_gate\n(They\'re just for display)')
buttons.set_green_button_lights(0,0,0,0)
buttons.metal_button.color=(255,0,0)
while True:
cheap =ribbon.cheap_single_touch_reading()
single=ribbon.single_touch_reading()
dual =ribbon.dual_touch_reading()
c_raw_value,c_gate = cheap .raw_value, cheap .gate
raw_value ,s_gate = single.raw_value, single.gate
raw_a,raw_b = dual.raw_a,dual.raw_b
message = '%s %i %i %.5f %.5f %.5f %.5f'%(ribbon.name, int(c_gate), int(s_gate), c_raw_value, raw_value, raw_a, raw_b)
print(message)
buttons.set_green_button_lights(c_gate,s_gate,0,0)
if buttons.metal_press_viewer.value:
buttons.metal_button.color=(0,0,0)
display.set_text('Running raw uart test:\nDone!')
break
def test_ribbon_dual_touch(ribbon):
import lightboard.buttons as buttons
import lightboard.display as display
display.set_text('Running dual-touch test on\nRibbon %s\n\nWhen yellow dot, one touch\nWhen white dot, two touches\n\nPress metal to exit'%ribbon.name)
buttons.set_green_button_lights(0,0,0,0)
buttons.metal_button.color=(255,0,0)
while not buttons.metal_press_viewer.value:
dual =ribbon.processed_dual_touch_reading()
if not dual.gate:
continue
neopixels.draw_all_off()
neopixels.draw_dot(dual.top, 64,0,128)
neopixels.draw_dot(dual.bot, 128,0,64)
neopixels.draw_dot(dual.mid, 128,128,128*(dual.num_fingers-1))
neopixels.refresh()
buttons.metal_button.color=(0,0,0)
display.set_text('test_ribbon_dual_touch: Done!')
def show_calibration_menu():
import lightboard.widgets as widgets
options = OrderedDict()
options['Calibrate Rib A'] = ribbon_a.run_calibration
options['Calibrate Rib B'] = ribbon_b.run_calibration
options['Smooth Demo A' ] = ribbon_a.test_smooth_demo
options['Smooth Demo B' ] = ribbon_b.test_smooth_demo
options['Raw UART Demo A'] = lambda: test_ribbon_raw_uart(ribbon_a)
options['Raw UART Demo B'] = lambda: test_ribbon_raw_uart(ribbon_b)
options['Dual Touch Demo A'] = lambda: test_ribbon_dual_touch(ribbon_a)
options['Dual Touch Demo B'] = lambda: test_ribbon_dual_touch(ribbon_b)
widgets.run_select_subroutine(options)
ribbon_a=Ribbon('a',rib_a_mid,ads_a,ads_a_single,ads_a_dual_top,ads_a_dual_b)
ribbon_b=Ribbon('b',rib_b_mid,ads_b,ads_b_single,ads_b_dual_top,ads_b_dual_b)
|
6,797 | 2e744c0cbddf64a9c538c9f33fa19ff78c515012 | """
Stores custom FASTA sequences under a uuid in the database.
Part of the tables used for custom jobs.
"""
import uuid
from pred.webserver.errors import ClientException, ErrorType, raise_on_too_big_uploaded_data
from pred.queries.dbutil import update_database, read_database
from Bio import SeqIO
from io import StringIO
class SequenceList(object):
"""
CRUD for managing FASTA file contents in the database.
"""
def __init__(self, seq_uuid):
"""
Setup sequence list with primary key seq_uuid.
:param seq_uuid: str: uuid that uniquely represents this list.
"""
if not seq_uuid:
raise ValueError("SequenceList uuid must have a value yours:'{}'.".format(seq_uuid))
self.seq_uuid = seq_uuid
self.content = None
self.created = None
self.title = None
def insert(self, db):
"""
Save self.contents to the database under self.seq_uuid.
:param db: database connection
"""
if not self.content:
raise ValueError("SequenceList content property must be filled in before calling save.")
if not self.title:
raise ValueError("SequenceList title property must be filled in before calling save.")
seq_item_list = SequenceListItems(self.content)
cur = db.cursor()
self._insert_data(cur, seq_item_list, self.title)
cur.close()
db.commit()
def _insert_data(self, cur, item_list, title):
cur.execute("insert into sequence_list(id, data, title) values(%s, %s, %s)",
[self.seq_uuid, item_list.data, title])
for item in item_list.items:
cur.execute("insert into sequence_list_item(seq_id, idx, name, sequence) values(%s, %s, %s, %s)",
[self.seq_uuid, item['idx'], item['name'], item['sequence']])
def load(self, db):
"""
Load self.contents from the database based on self.seq_uuid.
:param db: database connection
"""
rows = read_database(db, "select data, created, title from sequence_list where id = %s", [self.seq_uuid])
if not rows:
raise KeyError("Unable to find sequence for {}".format(self.seq_uuid))
first_row = rows[0]
self.content = first_row[0]
self.created = first_row[1]
self.title = first_row[2]
@staticmethod
def create_with_content_and_title(db, content, title):
"""
Saves content into the database under a new uuid.
:param db: database connection
:param content: str: FASTA file data to save in the database
:return: str: new uuid created for this content
"""
sequence_list = SequenceList(str(uuid.uuid1()))
sequence_list.content = content
sequence_list.title = title
sequence_list.insert(db)
return sequence_list.seq_uuid
@staticmethod
def read_list(db, seq_uuid):
"""
Lookup the content from the database via the seq_uuid provided.
:param db: database connection
:param seq_uuid: str: uuid to lookup
:return: str: FASTA file data associated with the seq_uuid
"""
sequence_list = SequenceList(seq_uuid)
sequence_list.load(db)
return sequence_list
@staticmethod
def delete_old_and_unattached(cur, hours):
result = []
select_sql = "select sequence_list.id from sequence_list " \
" left outer join job on sequence_list.id = job.seq_id " \
" where job.id is null " \
" and CURRENT_TIMESTAMP - sequence_list.created > interval '{} hours'".format(hours)
cur.execute(select_sql, [])
for row in cur.fetchall():
seq_id = row[0]
cur.execute("delete from sequence_list_item where seq_id = %s", [seq_id])
cur.execute("delete from sequence_list where id = %s", [seq_id])
return result
class SequenceListItems(object):
"""
Record per sequence name in SequenceList.
Used to lookup sequence for results.
"""
def __init__(self, data):
raise_on_too_big_uploaded_data(data)
self.data = SequenceListItems.make_fasta(data.strip())
self.items = SequenceListItems.find_sequence_items(self.data)
@staticmethod
def make_fasta(data):
"""
Convert string to FASTA if necessary.
:param data: str: input value either FASTA or newline separated sequences
:return: str: FASTA data
"""
result = data
if not data.startswith(">"):
result = ""
cnt = 1
for line in data.split('\n'):
if line:
result += ">seq{}\n".format(cnt)
result += line
result += "\n"
cnt += 1
return result.strip()
@staticmethod
def find_sequence_items(data):
"""
Parse FASTA data and return a list of {idx, name, sequence}.
:param data: str: FASTA data to parse
:return: [dict]: sequences in the FASTA data
"""
results = []
cnt = 1
seqs = SeqIO.parse(StringIO(data), 'fasta')
for seq in seqs:
results.append({
'idx': cnt,
'name': seq.name,
'sequence': str(seq.seq)
})
cnt += 1
SequenceListItems.verify_unique_names(results)
return results
@staticmethod
def verify_unique_names(items):
"""
Make sure that we don't have any duplicate names in the list.
Raises UserFacingException if the names are duplicated.
:param items: [{}]: list of dictionaries with name property to check
"""
unique_names = set([item['name'] for item in items])
if len(unique_names) != len(items):
raise ClientException("Error: Duplicate sequence names found.", ErrorType.INVALID_SEQUENCE_DATA)
|
6,798 | 5d8715dd02feff4e13919858051abeb5b6828011 | # Imports
import numpy as np
from ctf.functions2d.function2d import Function2D
# Problem
class StyblinskiTang(Function2D):
""" Styblinski-Tang Function. """
def __init__(self):
""" Constructor. """
# Information
self.min = np.array([-2.903534, -2.903534])
self.value = -39.16599*2.0
self.domain = np.array([[-5.0, 5.0], [-5.0, 5.0]])
self.n = 2
self.smooth = True
self.info = [True, True, True]
# Description
self.latex_name = "Styblinski-Tang Function"
self.latex_type = "Other"
self.latex_cost = r'\[ f(\mathbf{x}) = \frac{1}{2} \sum_{i=0}^{d-1} (x_i^4 - 16 x_i^2 + 5 x_i) \]'
self.latex_desc = "The local minima are separated by a local maximum. There is only a single global minimum."
def cost(self, x):
""" Cost function. """
# Cost
c = np.zeros(x.shape[1:])
# Calculate Cost
c = 0.5*(x[0]**4.0 - 16*x[0]**2.0 + 5.0*x[0] + x[1]**4.0 - 16*x[1]**2.0 + 5.0*x[1])
# Return Cost
return c
def grad(self, x):
""" Grad function. """
# Grad
g = np.zeros(x.shape)
# Calculate Grads
g[0] = -16.0*x[0]**1.0 + 2.0*x[0]**3.0 + 2.5
g[1] = -16.0*x[1]**1.0 + 2.0*x[1]**3.0 + 2.5
# Return Grad
return g
def hess(self, x):
""" Hess function. """
# Hess
h = np.zeros((2, 2) + x.shape[1:])
# Calculate Hess
h[0][0] = 6.0*x[0]**2.0 - 16.0
h[0][1] = 0.0
h[1][0] = h[0][1]
h[1][1] = 6.0*x[1]**2.0 - 16.0
# Return Hess
return h |
6,799 | 0dbdd7f7adffed850f126a2054c764b421c6ab84 | from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.classify import NaiveBayesClassifier
from nltk.probability import FreqDist
import csv
f = open('trolls.csv', 'r')
file = csv.reader(f)
sentences=[]
remarks=[]
psObject = PorterStemmer()
illegal_chars = [
'.',',','@',"'",'+','-','*',
]
paragraph=''
for kk in file :
paragraph+=kk[0]
f.close()
f = open('trolls.csv', 'r')
file = csv.reader(f)
all_words = word_tokenize(paragraph)
# print(all_words)
all2 = FreqDist(all_words)
most_common_words = list(all2.most_common(100))
print('most commons below...')
print(most_common_words)
most_cm_1=[]
for i,j in most_common_words:
most_cm_1.append(i)
# print(most_cm_1)
stopWords = stopwords.words('english')
all_words = []
for i in file :
filtered=''
filtered_from_stopWords=''
counter = 0
for j in range(len(illegal_chars)) :
if counter == 0:
counter+=1
filtered = i[0].replace(illegal_chars[j], '')
else :
filtered=filtered.replace(illegal_chars[j],'')
counter=0
filteredArr = filtered.split(' ')
for x in filteredArr :
if x not in stopWords :
filtered_from_stopWords+=x+' '
bb=[]
filtered_from_stopWords_ARRAY=filtered_from_stopWords.split(' ')
features = {w.lower(): (w in most_cm_1) for w in filtered_from_stopWords_ARRAY}
bb.append(features)
bb.append(i[1])
sentences.append(bb)
remarks.append(i[1])
count =0
print(remarks)
print(sentences)
classifier = NaiveBayesClassifier.train(sentences)
inputs = input('Enter a comment ')
words_entered=inputs.split(' ')
entry = {w: ( True) for w in words_entered}
print(classifier.classify(entry))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.